未验证 提交 6d0f730d 编写于 作者: L Leo Chen 提交者: GitHub

clean unused code: save_load_util.cc/.h (#47588)

上级 9adad42d
...@@ -1166,19 +1166,6 @@ cc_test_old( ...@@ -1166,19 +1166,6 @@ cc_test_old(
string_helper string_helper
glog) glog)
cc_library(
save_load_util
SRCS save_load_util.cc
DEPS tensor scope layer)
cc_test_old(
save_load_util_test
SRCS
save_load_util_test.cc
DEPS
save_load_util
tensor
scope
layer)
cc_library( cc_library(
generator generator
SRCS generator.cc SRCS generator.cc
......
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/save_load_util.h"
#include <fstream>
#include "paddle/fluid/framework/convert_utils.h"
#include "paddle/fluid/imperative/layer.h"
namespace paddle {
namespace framework {
const int model_file_reserve_size = 256;
const std::string tensor_number_mark = "TNUM"; // NOLINT
const std::string tensor_name_mark = "NAME"; // NOLINT
void CheckInStreamState(std::istream& istre, size_t length) {
if (!istre) {
VLOG(5) << "Can't read [" << length << "] from file"
<< "file seems breakem";
PADDLE_THROW(platform::errors::Unavailable(
"Model load failed, istream state error."));
}
}
struct DeserializedDataFunctor {
DeserializedDataFunctor(void** buf,
phi::DenseTensor* tensor,
const platform::Place& place)
: buf_(buf), tensor_(tensor), place_(place) {}
template <typename T>
void apply() {
*buf_ = tensor_->mutable_data<T>(place_);
}
void** buf_;
phi::DenseTensor* tensor_;
platform::Place place_;
};
size_t ReadTensorNumber(std::istream& istre) {
char* tensor_number_mark_buffer = new char[tensor_number_mark.size()];
istre.read(tensor_number_mark_buffer,
sizeof(char) * tensor_number_mark.size());
std::string str_read_tensor_number_mark(tensor_number_mark_buffer,
tensor_number_mark.size());
PADDLE_ENFORCE_EQ(
tensor_number_mark,
str_read_tensor_number_mark,
platform::errors::InvalidArgument(
"phi::DenseTensor number mark does not match, expect mark is "
"[%s], but the mark read from file is [%s].",
tensor_number_mark,
str_read_tensor_number_mark));
size_t tensor_number = 0;
istre.read(reinterpret_cast<char*>(&tensor_number), sizeof(tensor_number));
CheckInStreamState(istre, sizeof(tensor_number));
delete[] tensor_number_mark_buffer;
return tensor_number;
}
std::string ReadTensorName(std::istream& istre) {
char* name_mark_buffer = new char[tensor_name_mark.size()];
istre.read(name_mark_buffer, sizeof(char) * tensor_name_mark.size());
CheckInStreamState(istre, sizeof(char) * tensor_name_mark.size());
std::string str_read_tensor_name_mark(name_mark_buffer,
tensor_name_mark.size());
PADDLE_ENFORCE_EQ(
tensor_name_mark,
str_read_tensor_name_mark,
platform::errors::InvalidArgument(
"phi::DenseTensor name mark does not match, expect mark is [%s], "
"but the mark read from file is [%s].",
tensor_name_mark,
str_read_tensor_name_mark));
size_t tensor_name_length = 0;
istre.read(reinterpret_cast<char*>(&tensor_name_length),
sizeof(tensor_name_length));
CheckInStreamState(istre, sizeof(tensor_name_length));
char* tensor_name_buffer = new char[tensor_name_length];
istre.read(tensor_name_buffer, sizeof(char) * tensor_name_length);
CheckInStreamState(istre, sizeof(char) * tensor_name_length);
std::string str_tensor_name(tensor_name_buffer, tensor_name_length);
delete[] name_mark_buffer;
delete[] tensor_name_buffer;
return str_tensor_name;
}
void ReadReserveBuffer(std::istream& istre) {
char* reserve_buffer = new char[model_file_reserve_size];
istre.read(reserve_buffer, sizeof(char) * model_file_reserve_size);
CheckInStreamState(istre, model_file_reserve_size);
delete[] reserve_buffer;
}
bool SaveStaticNameListToDisk(
const std::string& file_name,
const std::vector<std::string>& vec_tensor_name_list,
const Scope& scope) {
std::map<std::string, phi::DenseTensor*> map_tensor;
for (size_t i = 0; i < vec_tensor_name_list.size(); ++i) {
auto var_ptr = scope.FindVar(vec_tensor_name_list[i]);
PADDLE_ENFORCE_NOT_NULL(
var_ptr,
platform::errors::NotFound("Variable (%s) is not found when "
"saving model, please make sure "
"that exe.run(startup_program) has "
"been executed.",
vec_tensor_name_list[i]));
phi::DenseTensor* tensor = var_ptr->GetMutable<phi::DenseTensor>();
PADDLE_ENFORCE_EQ(tensor->IsInitialized(),
true,
platform::errors::PreconditionNotMet(
"Paramter [%s] is not initialzed, please make sure "
"that exe.run(startup_program) has been executed.",
vec_tensor_name_list[i]));
map_tensor[vec_tensor_name_list[i]] = tensor;
}
return SaveTensorToDisk(file_name, map_tensor);
}
bool SaveDygraphVarBaseListToDisk(
const std::string& file_name,
const std::vector<std::shared_ptr<imperative::VarBase>>&
vec_var_base_list) {
std::map<std::string, phi::DenseTensor*> map_tensor;
for (size_t i = 0; i < vec_var_base_list.size(); ++i) {
auto var_ptr = vec_var_base_list[i]->MutableVar();
phi::DenseTensor* tensor = var_ptr->GetMutable<phi::DenseTensor>();
PADDLE_ENFORCE_EQ(tensor->IsInitialized(),
true,
platform::errors::PreconditionNotMet(
"Paramter [%s] is not initialzed, please make sure "
"that exe.run(startup_program) has been executed.",
vec_var_base_list[i]->Name()));
map_tensor[vec_var_base_list[i]->Name()] = tensor;
}
return SaveTensorToDisk(file_name, map_tensor);
}
const std::vector<std::shared_ptr<imperative::VarBase>>
LoadDygraphVarBaseListFromDisk(const std::string& file_name) {
std::map<std::string, std::shared_ptr<phi::DenseTensor>> map_load_tensor;
LoadTensorFromDisk(file_name, &map_load_tensor);
std::vector<std::shared_ptr<imperative::VarBase>> vec_res;
vec_res.reserve(map_load_tensor.size());
for (auto& load_tensor : map_load_tensor) {
std::shared_ptr<imperative::VarBase> var(
new imperative::VarBase(load_tensor.first));
auto* tensor = var->MutableVar()->GetMutable<phi::DenseTensor>();
TensorCopySync(
*(load_tensor.second.get()), load_tensor.second->place(), tensor);
vec_res.emplace_back(var);
}
return vec_res;
}
bool LoadStaticNameListFromDisk(
const std::string& file_name,
const std::vector<std::string>& vec_tensor_name_list,
const Scope& scope) {
std::map<std::string, std::shared_ptr<phi::DenseTensor>> map_load_tensor;
LoadTensorFromDisk(file_name, &map_load_tensor);
for (size_t i = 0; i < vec_tensor_name_list.size(); ++i) {
auto it = map_load_tensor.find(vec_tensor_name_list[i]);
PADDLE_ENFORCE_NE(it,
map_load_tensor.end(),
platform::errors::NotFound(
"Parameter (%s) not found in model file (%s).",
vec_tensor_name_list[i],
file_name));
auto var_ptr = scope.FindVar(vec_tensor_name_list[i]);
PADDLE_ENFORCE_NOT_NULL(
var_ptr,
platform::errors::PreconditionNotMet(
"Parameter (%s) is not created when loading model, "
"please make sure that exe.run(startup_program) has been executed.",
vec_tensor_name_list[i]));
phi::DenseTensor* tensor = var_ptr->GetMutable<phi::DenseTensor>();
PADDLE_ENFORCE_NOT_NULL(
tensor,
platform::errors::PreconditionNotMet(
"Paramter [%s] is not initialzed, "
"please make sure that exe.run(startup_program) has been executed.",
vec_tensor_name_list[i]));
PADDLE_ENFORCE_EQ(tensor->IsInitialized(),
true,
platform::errors::PreconditionNotMet(
"Paramter [%s] is not initialzed, "
"please make sure that exe.run(startup_program) has "
"been executed.v",
vec_tensor_name_list[i]));
PADDLE_ENFORCE_EQ(
tensor->dims(),
it->second->dims(),
platform::errors::InvalidArgument(
"Shape does not match, the program requires a parameter with a "
"shape of "
"(%s), while the loaded parameter (namely [ %s ]) has a shape of "
"(%s).",
tensor->dims(),
vec_tensor_name_list[i],
it->second->dims()));
TensorCopySync(*(it->second.get()), tensor->place(), tensor);
map_load_tensor.erase(it);
}
if (map_load_tensor.size() > 0) {
std::string used_tensor_message = "There is [" +
std::to_string(map_load_tensor.size()) +
"] tensor in model file not used: ";
for (auto& tensor_temp : map_load_tensor) {
used_tensor_message += " " + tensor_temp.first;
}
LOG(ERROR) << used_tensor_message;
}
return true;
}
bool SaveTensorToDisk(
const std::string& file_name,
const std::map<std::string, phi::DenseTensor*>& map_tensor) {
MkDirRecursively(DirName(file_name).c_str());
std::ofstream fout(file_name, std::ios::binary);
PADDLE_ENFORCE_EQ(
fout.is_open(),
true,
platform::errors::Unavailable("File (%s) open failed.", file_name));
// first 256 byte for reserve for fulture upgrade
char* kReserveBuffer = new char[model_file_reserve_size];
fout.write(kReserveBuffer, sizeof(char) * model_file_reserve_size);
delete[] kReserveBuffer;
fout.write(tensor_number_mark.c_str(),
sizeof(char) * tensor_number_mark.size());
size_t tensor_number = map_tensor.size();
fout.write(reinterpret_cast<const char*>(&tensor_number),
sizeof(tensor_number));
for (auto& itera : map_tensor) {
// first save tensor name
fout.write(tensor_name_mark.c_str(),
sizeof(char) * tensor_name_mark.size());
size_t name_length = itera.first.size();
fout.write(reinterpret_cast<const char*>(&name_length),
sizeof(name_length));
fout.write(itera.first.c_str(), sizeof(char) * name_length);
// write tensor version
constexpr uint32_t version = 0;
fout.write(reinterpret_cast<const char*>(&version), sizeof(version));
// the 2nd field, tensor description
// int32_t size
// void* protobuf message
auto tensor = itera.second;
proto::VarType::TensorDesc desc;
desc.set_data_type(framework::TransToProtoVarType(tensor->dtype()));
auto dims = phi::vectorize(tensor->dims());
auto* pb_dims = desc.mutable_dims();
pb_dims->Resize(static_cast<int>(dims.size()), 0);
std::copy(dims.begin(), dims.end(), pb_dims->begin());
int32_t size = desc.ByteSize();
fout.write(reinterpret_cast<const char*>(&size), sizeof(size));
auto out = desc.SerializeAsString();
fout.write(out.data(), size);
// save tensor
uint64_t data_size =
tensor->numel() * framework::DataTypeSize(tensor->dtype());
auto* data_ptr = tensor->data();
if (platform::is_gpu_place(tensor->place())) {
#if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP)
phi::DenseTensor temp;
TensorCopySync(*tensor, platform::CPUPlace(), &temp);
data_ptr = temp.data();
#else
PADDLE_THROW(
platform::errors::Unavailable("phi::DenseTensor is in CUDA device, "
"but paddle not compiled with CUDA."));
#endif
}
fout.write(static_cast<const char*>(data_ptr),
static_cast<std::streamsize>(data_size));
}
if (!fout) {
PADDLE_THROW(platform::errors::Unavailable(
"Model save failed, error when writing data into model file [%s].",
file_name));
}
fout.close();
return true;
}
bool LoadTensorFromDisk(
const std::string& file_name,
std::map<std::string, std::shared_ptr<phi::DenseTensor>>* map_tensor) {
std::ifstream fin(file_name, std::ios::binary);
PADDLE_ENFORCE_EQ(
fin.is_open(),
true,
platform::errors::Unavailable("File (%s) open failed.", file_name));
ReadReserveBuffer(fin);
size_t tensor_number = ReadTensorNumber(fin);
for (size_t i = 0; i < tensor_number; ++i) {
std::string str_tensor_name = ReadTensorName(fin);
std::shared_ptr<phi::DenseTensor> tensor_temp(new phi::DenseTensor());
uint32_t version;
fin.read(reinterpret_cast<char*>(&version), sizeof(version));
CheckInStreamState(fin, sizeof(version));
PADDLE_ENFORCE_EQ(version,
0U,
platform::errors::InvalidArgument(
"Only version 0 tensor is supported."));
proto::VarType::TensorDesc desc;
{
// int32_t size
// proto buffer
int32_t size;
fin.read(reinterpret_cast<char*>(&size), sizeof(size));
CheckInStreamState(fin, sizeof(size));
std::unique_ptr<char[]> buf(new char[size]);
fin.read(reinterpret_cast<char*>(buf.get()), size);
CheckInStreamState(fin, sizeof(size));
PADDLE_ENFORCE_EQ(
desc.ParseFromArray(buf.get(), size),
true,
platform::errors::InvalidArgument("Parse tensor desc failed."));
}
{ // read tensor
std::vector<int64_t> dims;
dims.reserve(static_cast<size_t>(desc.dims().size()));
std::copy(
desc.dims().begin(), desc.dims().end(), std::back_inserter(dims));
auto new_dim = phi::make_ddim(dims);
tensor_temp->Resize(new_dim);
void* buf;
framework::VisitDataType(
desc.data_type(),
DeserializedDataFunctor(
&buf, tensor_temp.get(), platform::CPUPlace()));
size_t size =
tensor_temp->numel() * framework::SizeOfType(desc.data_type());
fin.read(reinterpret_cast<char*>(buf), size);
CheckInStreamState(fin, size);
}
(*map_tensor)[str_tensor_name] = tensor_temp;
}
return true;
}
} // namespace framework
} // namespace paddle
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <fstream>
#include <iostream>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "paddle/fluid/framework/scope.h"
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/imperative/type_defs.h"
namespace paddle {
namespace framework {
class Scope;
bool SaveStaticNameListToDisk(
const std::string& file_name,
const std::vector<std::string>& vec_tensor_name_list,
const Scope& scope);
bool LoadStaticNameListFromDisk(
const std::string& file_name,
const std::vector<std::string>& vec_tensor_name_list,
const Scope& scope);
bool SaveDygraphVarBaseListToDisk(
const std::string& file_name,
const std::vector<std::shared_ptr<imperative::VarBase>>& vec_var_base_list);
const std::vector<std::shared_ptr<imperative::VarBase>>
LoadDygraphVarBaseListFromDisk(const std::string& file_name);
bool SaveTensorToDisk(
const std::string& file_name,
const std::map<std::string, phi::DenseTensor*>& map_tensor);
bool LoadTensorFromDisk(
const std::string& file_name,
std::map<std::string, std::shared_ptr<phi::DenseTensor>>* map_tensor);
} // namespace framework
} // namespace paddle
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/save_load_util.h"
#include <stdlib.h>
#include <time.h>
#include "gtest/gtest.h"
namespace paddle {
namespace framework {
TEST(test_save_load_util, test_save_load) {
srand(time(NULL));
auto cpu_place = platform::CPUPlace();
phi::DenseTensor tensor1;
tensor1.Resize({1000, 1000});
auto src_data_1 = tensor1.mutable_data<float>(cpu_place);
phi::DenseTensor tensor2;
tensor2.Resize({5000, 1000});
auto src_data_2 = tensor2.mutable_data<float>(cpu_place);
for (int64_t i = 0; i < tensor1.numel(); ++i) {
float temp = (rand() % 10000) * 1.0 / 50000 - 1.0; // NOLINT
src_data_1[i] = temp;
}
for (int64_t i = 0; i < tensor2.numel(); ++i) {
float temp = (rand() % 10000) * 1.0 / 50000 - 1.0; // NOLINT
src_data_2[i] = temp;
}
std::map<std::string, phi::DenseTensor*> map_tensor;
map_tensor["t1"] = &tensor1;
map_tensor["t2"] = &tensor2;
SaveTensorToDisk("test_1", map_tensor);
std::map<std::string, std::shared_ptr<phi::DenseTensor>> load_map_tensor;
LoadTensorFromDisk("test_1", &load_map_tensor);
ASSERT_TRUE(load_map_tensor.find("t1") != load_map_tensor.end());
ASSERT_TRUE(load_map_tensor.find("t2") != load_map_tensor.end());
auto new_tensor_1 = load_map_tensor["t1"];
auto new_tensor_2 = load_map_tensor["t2"];
float* ptr_1 = tensor1.data<float>();
float* ptr_1_new = new_tensor_1->data<float>();
for (int64_t i = 0; i < tensor1.numel(); ++i) {
ASSERT_EQ(ptr_1[i], ptr_1_new[i]);
}
float* ptr_2 = tensor2.data<float>();
float* ptr_2_new = new_tensor_2->data<float>();
for (int64_t i = 0; i < tensor2.numel(); ++i) {
ASSERT_EQ(ptr_2[i], ptr_2_new[i]);
}
}
} // namespace framework
} // namespace paddle
...@@ -22,7 +22,6 @@ set(PYBIND_DEPS ...@@ -22,7 +22,6 @@ set(PYBIND_DEPS
analysis_predictor analysis_predictor
imperative_profiler imperative_profiler
imperative_flag imperative_flag
save_load_util
dlpack_tensor dlpack_tensor
device_context device_context
gloo_wrapper gloo_wrapper
......
...@@ -55,7 +55,6 @@ limitations under the License. */ ...@@ -55,7 +55,6 @@ limitations under the License. */
#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/framework/phi_utils.h"
#include "paddle/fluid/framework/prune.h" #include "paddle/fluid/framework/prune.h"
#include "paddle/fluid/framework/reader.h" #include "paddle/fluid/framework/reader.h"
#include "paddle/fluid/framework/save_load_util.h"
#include "paddle/fluid/framework/scope_pool.h" #include "paddle/fluid/framework/scope_pool.h"
#include "paddle/fluid/framework/selected_rows_utils.h" #include "paddle/fluid/framework/selected_rows_utils.h"
#include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/framework/tensor_util.h"
......
...@@ -55,7 +55,6 @@ limitations under the License. */ ...@@ -55,7 +55,6 @@ limitations under the License. */
#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/framework/phi_utils.h"
#include "paddle/fluid/framework/prune.h" #include "paddle/fluid/framework/prune.h"
#include "paddle/fluid/framework/reader.h" #include "paddle/fluid/framework/reader.h"
#include "paddle/fluid/framework/save_load_util.h"
#include "paddle/fluid/framework/scope_pool.h" #include "paddle/fluid/framework/scope_pool.h"
#include "paddle/fluid/framework/selected_rows_utils.h" #include "paddle/fluid/framework/selected_rows_utils.h"
#include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/framework/tensor_util.h"
......
...@@ -56,7 +56,6 @@ limitations under the License. */ ...@@ -56,7 +56,6 @@ limitations under the License. */
#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/framework/phi_utils.h"
#include "paddle/fluid/framework/prune.h" #include "paddle/fluid/framework/prune.h"
#include "paddle/fluid/framework/reader.h" #include "paddle/fluid/framework/reader.h"
#include "paddle/fluid/framework/save_load_util.h"
#include "paddle/fluid/framework/scope_pool.h" #include "paddle/fluid/framework/scope_pool.h"
#include "paddle/fluid/framework/selected_rows_utils.h" #include "paddle/fluid/framework/selected_rows_utils.h"
#include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/framework/tensor_util.h"
......
...@@ -55,7 +55,6 @@ limitations under the License. */ ...@@ -55,7 +55,6 @@ limitations under the License. */
#include "paddle/fluid/framework/phi_utils.h" #include "paddle/fluid/framework/phi_utils.h"
#include "paddle/fluid/framework/prune.h" #include "paddle/fluid/framework/prune.h"
#include "paddle/fluid/framework/reader.h" #include "paddle/fluid/framework/reader.h"
#include "paddle/fluid/framework/save_load_util.h"
#include "paddle/fluid/framework/scope_pool.h" #include "paddle/fluid/framework/scope_pool.h"
#include "paddle/fluid/framework/selected_rows_utils.h" #include "paddle/fluid/framework/selected_rows_utils.h"
#include "paddle/fluid/framework/tensor_util.h" #include "paddle/fluid/framework/tensor_util.h"
......
...@@ -28,7 +28,6 @@ HIGH_PARALLEL_JOB_NEW = [ ...@@ -28,7 +28,6 @@ HIGH_PARALLEL_JOB_NEW = [
'test_fc_gru_fuse_pass_cc', 'test_fc_gru_fuse_pass_cc',
'device_worker_test', 'device_worker_test',
'test_custom_conj', 'test_custom_conj',
'save_load_util_test',
'infer_io_utils_tester', 'infer_io_utils_tester',
'test_transpose_bf16_mkldnn_op', 'test_transpose_bf16_mkldnn_op',
'test_container', 'test_container',
...@@ -2024,7 +2023,6 @@ CPU_PARALLEL_JOB = [ ...@@ -2024,7 +2023,6 @@ CPU_PARALLEL_JOB = [
'save_quant2_model_resnet50', 'save_quant2_model_resnet50',
'save_quant2_model_gru', 'save_quant2_model_gru',
'save_quant2_model_ernie', 'save_quant2_model_ernie',
'save_load_util_test',
'save_load_op_test', 'save_load_op_test',
'save_load_combine_op_test', 'save_load_combine_op_test',
'rw_lock_test', 'rw_lock_test',
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册