未验证 提交 797a7184 编写于 作者: Y Yi Wang 提交者: GitHub

Unify Fluid code to Google C++ style (#9685)

上级 d00bd9eb
...@@ -41,8 +41,7 @@ bool IsPersistable(const framework::VarDesc* var) { ...@@ -41,8 +41,7 @@ bool IsPersistable(const framework::VarDesc* var) {
return false; return false;
} }
void LoadPersistables(framework::Executor& executor, void LoadPersistables(framework::Executor& executor, framework::Scope& scope,
framework::Scope& scope,
const framework::ProgramDesc& main_program, const framework::ProgramDesc& main_program,
const std::string& dirname, const std::string& dirname,
const std::string& param_filename) { const std::string& param_filename) {
...@@ -108,10 +107,8 @@ std::unique_ptr<framework::ProgramDesc> Load(framework::Executor& executor, ...@@ -108,10 +107,8 @@ std::unique_ptr<framework::ProgramDesc> Load(framework::Executor& executor,
} }
std::unique_ptr<framework::ProgramDesc> Load( std::unique_ptr<framework::ProgramDesc> Load(
framework::Executor& executor, framework::Executor& executor, framework::Scope& scope,
framework::Scope& scope, const std::string& prog_filename, const std::string& param_filename) {
const std::string& prog_filename,
const std::string& param_filename) {
std::string model_filename = prog_filename; std::string model_filename = prog_filename;
std::string program_desc_str; std::string program_desc_str;
ReadBinaryFile(model_filename, program_desc_str); ReadBinaryFile(model_filename, program_desc_str);
......
...@@ -24,8 +24,7 @@ limitations under the License. */ ...@@ -24,8 +24,7 @@ limitations under the License. */
namespace paddle { namespace paddle {
namespace inference { namespace inference {
void LoadPersistables(framework::Executor& executor, void LoadPersistables(framework::Executor& executor, framework::Scope& scope,
framework::Scope& scope,
const framework::ProgramDesc& main_program, const framework::ProgramDesc& main_program,
const std::string& dirname, const std::string& dirname,
const std::string& param_filename); const std::string& param_filename);
......
...@@ -30,8 +30,8 @@ TEST(inference, fit_a_line) { ...@@ -30,8 +30,8 @@ TEST(inference, fit_a_line) {
// The second dim of the input tensor should be 13 // The second dim of the input tensor should be 13
// The input data should be >= 0 // The input data should be >= 0
int64_t batch_size = 10; int64_t batch_size = 10;
SetupTensor<float>( SetupTensor<float>(input, {batch_size, 13}, static_cast<float>(0),
input, {batch_size, 13}, static_cast<float>(0), static_cast<float>(10)); static_cast<float>(10));
std::vector<paddle::framework::LoDTensor*> cpu_feeds; std::vector<paddle::framework::LoDTensor*> cpu_feeds;
cpu_feeds.push_back(&input); cpu_feeds.push_back(&input);
......
...@@ -35,10 +35,8 @@ TEST(inference, image_classification) { ...@@ -35,10 +35,8 @@ TEST(inference, image_classification) {
paddle::framework::LoDTensor input; paddle::framework::LoDTensor input;
// Use normilized image pixels as input data, // Use normilized image pixels as input data,
// which should be in the range [0.0, 1.0]. // which should be in the range [0.0, 1.0].
SetupTensor<float>(input, SetupTensor<float>(input, {FLAGS_batch_size, 3, 32, 32},
{FLAGS_batch_size, 3, 32, 32}, static_cast<float>(0), static_cast<float>(1));
static_cast<float>(0),
static_cast<float>(1));
std::vector<paddle::framework::LoDTensor*> cpu_feeds; std::vector<paddle::framework::LoDTensor*> cpu_feeds;
cpu_feeds.push_back(&input); cpu_feeds.push_back(&input);
...@@ -48,8 +46,8 @@ TEST(inference, image_classification) { ...@@ -48,8 +46,8 @@ TEST(inference, image_classification) {
// Run inference on CPU // Run inference on CPU
LOG(INFO) << "--- CPU Runs: ---"; LOG(INFO) << "--- CPU Runs: ---";
TestInference<paddle::platform::CPUPlace>( TestInference<paddle::platform::CPUPlace>(dirname, cpu_feeds, cpu_fetchs1,
dirname, cpu_feeds, cpu_fetchs1, FLAGS_repeat); FLAGS_repeat);
LOG(INFO) << output1.dims(); LOG(INFO) << output1.dims();
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
...@@ -59,8 +57,8 @@ TEST(inference, image_classification) { ...@@ -59,8 +57,8 @@ TEST(inference, image_classification) {
// Run inference on CUDA GPU // Run inference on CUDA GPU
LOG(INFO) << "--- GPU Runs: ---"; LOG(INFO) << "--- GPU Runs: ---";
TestInference<paddle::platform::CUDAPlace>( TestInference<paddle::platform::CUDAPlace>(dirname, cpu_feeds, cpu_fetchs2,
dirname, cpu_feeds, cpu_fetchs2, FLAGS_repeat); FLAGS_repeat);
LOG(INFO) << output2.dims(); LOG(INFO) << output2.dims();
CheckError<float>(output1, output2); CheckError<float>(output1, output2);
......
...@@ -36,37 +36,21 @@ TEST(inference, label_semantic_roles) { ...@@ -36,37 +36,21 @@ TEST(inference, label_semantic_roles) {
int64_t predicate_dict_len = 3162; int64_t predicate_dict_len = 3162;
int64_t mark_dict_len = 2; int64_t mark_dict_len = 2;
SetupLoDTensor(word, SetupLoDTensor(word, lod, static_cast<int64_t>(0),
lod,
static_cast<int64_t>(0),
static_cast<int64_t>(word_dict_len - 1)); static_cast<int64_t>(word_dict_len - 1));
SetupLoDTensor(predicate, SetupLoDTensor(predicate, lod, static_cast<int64_t>(0),
lod,
static_cast<int64_t>(0),
static_cast<int64_t>(predicate_dict_len - 1)); static_cast<int64_t>(predicate_dict_len - 1));
SetupLoDTensor(ctx_n2, SetupLoDTensor(ctx_n2, lod, static_cast<int64_t>(0),
lod,
static_cast<int64_t>(0),
static_cast<int64_t>(word_dict_len - 1)); static_cast<int64_t>(word_dict_len - 1));
SetupLoDTensor(ctx_n1, SetupLoDTensor(ctx_n1, lod, static_cast<int64_t>(0),
lod,
static_cast<int64_t>(0),
static_cast<int64_t>(word_dict_len - 1)); static_cast<int64_t>(word_dict_len - 1));
SetupLoDTensor(ctx_0, SetupLoDTensor(ctx_0, lod, static_cast<int64_t>(0),
lod,
static_cast<int64_t>(0),
static_cast<int64_t>(word_dict_len - 1)); static_cast<int64_t>(word_dict_len - 1));
SetupLoDTensor(ctx_p1, SetupLoDTensor(ctx_p1, lod, static_cast<int64_t>(0),
lod,
static_cast<int64_t>(0),
static_cast<int64_t>(word_dict_len - 1)); static_cast<int64_t>(word_dict_len - 1));
SetupLoDTensor(ctx_p2, SetupLoDTensor(ctx_p2, lod, static_cast<int64_t>(0),
lod,
static_cast<int64_t>(0),
static_cast<int64_t>(word_dict_len - 1)); static_cast<int64_t>(word_dict_len - 1));
SetupLoDTensor(mark, SetupLoDTensor(mark, lod, static_cast<int64_t>(0),
lod,
static_cast<int64_t>(0),
static_cast<int64_t>(mark_dict_len - 1)); static_cast<int64_t>(mark_dict_len - 1));
std::vector<paddle::framework::LoDTensor*> cpu_feeds; std::vector<paddle::framework::LoDTensor*> cpu_feeds;
......
...@@ -35,10 +35,8 @@ TEST(inference, recognize_digits) { ...@@ -35,10 +35,8 @@ TEST(inference, recognize_digits) {
paddle::framework::LoDTensor input; paddle::framework::LoDTensor input;
// Use normilized image pixels as input data, // Use normilized image pixels as input data,
// which should be in the range [-1.0, 1.0]. // which should be in the range [-1.0, 1.0].
SetupTensor<float>(input, SetupTensor<float>(input, {FLAGS_batch_size, 1, 28, 28},
{FLAGS_batch_size, 1, 28, 28}, static_cast<float>(-1), static_cast<float>(1));
static_cast<float>(-1),
static_cast<float>(1));
std::vector<paddle::framework::LoDTensor*> cpu_feeds; std::vector<paddle::framework::LoDTensor*> cpu_feeds;
cpu_feeds.push_back(&input); cpu_feeds.push_back(&input);
...@@ -49,8 +47,8 @@ TEST(inference, recognize_digits) { ...@@ -49,8 +47,8 @@ TEST(inference, recognize_digits) {
// Run inference on CPU // Run inference on CPU
LOG(INFO) << "--- CPU Runs: is_combined=" << is_combined << " ---"; LOG(INFO) << "--- CPU Runs: is_combined=" << is_combined << " ---";
TestInference<paddle::platform::CPUPlace>( TestInference<paddle::platform::CPUPlace>(dirname, cpu_feeds, cpu_fetchs1,
dirname, cpu_feeds, cpu_fetchs1, FLAGS_repeat, is_combined); FLAGS_repeat, is_combined);
LOG(INFO) << output1.dims(); LOG(INFO) << output1.dims();
#ifdef PADDLE_WITH_CUDA #ifdef PADDLE_WITH_CUDA
...@@ -60,8 +58,8 @@ TEST(inference, recognize_digits) { ...@@ -60,8 +58,8 @@ TEST(inference, recognize_digits) {
// Run inference on CUDA GPU // Run inference on CUDA GPU
LOG(INFO) << "--- GPU Runs: is_combined=" << is_combined << " ---"; LOG(INFO) << "--- GPU Runs: is_combined=" << is_combined << " ---";
TestInference<paddle::platform::CUDAPlace>( TestInference<paddle::platform::CUDAPlace>(dirname, cpu_feeds, cpu_fetchs2,
dirname, cpu_feeds, cpu_fetchs2, FLAGS_repeat, is_combined); FLAGS_repeat, is_combined);
LOG(INFO) << output2.dims(); LOG(INFO) << output2.dims();
CheckError<float>(output1, output2); CheckError<float>(output1, output2);
......
...@@ -32,10 +32,10 @@ TEST(inference, rnn_encoder_decoder) { ...@@ -32,10 +32,10 @@ TEST(inference, rnn_encoder_decoder) {
paddle::framework::LoDTensor word_data, trg_word; paddle::framework::LoDTensor word_data, trg_word;
paddle::framework::LoD lod{{0, 4, 10}}; paddle::framework::LoD lod{{0, 4, 10}};
SetupLoDTensor( SetupLoDTensor(word_data, lod, static_cast<int64_t>(0),
word_data, lod, static_cast<int64_t>(0), static_cast<int64_t>(1)); static_cast<int64_t>(1));
SetupLoDTensor( SetupLoDTensor(trg_word, lod, static_cast<int64_t>(0),
trg_word, lod, static_cast<int64_t>(0), static_cast<int64_t>(1)); static_cast<int64_t>(1));
std::vector<paddle::framework::LoDTensor*> cpu_feeds; std::vector<paddle::framework::LoDTensor*> cpu_feeds;
cpu_feeds.push_back(&word_data); cpu_feeds.push_back(&word_data);
......
...@@ -33,9 +33,7 @@ TEST(inference, understand_sentiment) { ...@@ -33,9 +33,7 @@ TEST(inference, understand_sentiment) {
paddle::framework::LoD lod{{0, 4, 10}}; paddle::framework::LoD lod{{0, 4, 10}};
int64_t word_dict_len = 5147; int64_t word_dict_len = 5147;
SetupLoDTensor(words, SetupLoDTensor(words, lod, static_cast<int64_t>(0),
lod,
static_cast<int64_t>(0),
static_cast<int64_t>(word_dict_len - 1)); static_cast<int64_t>(word_dict_len - 1));
std::vector<paddle::framework::LoDTensor*> cpu_feeds; std::vector<paddle::framework::LoDTensor*> cpu_feeds;
......
...@@ -19,9 +19,7 @@ limitations under the License. */ ...@@ -19,9 +19,7 @@ limitations under the License. */
template <typename T> template <typename T>
void SetupTensor(paddle::framework::LoDTensor& input, void SetupTensor(paddle::framework::LoDTensor& input,
paddle::framework::DDim dims, paddle::framework::DDim dims, T lower, T upper) {
T lower,
T upper) {
srand(time(0)); srand(time(0));
T* input_ptr = input.mutable_data<T>(dims, paddle::platform::CPUPlace()); T* input_ptr = input.mutable_data<T>(dims, paddle::platform::CPUPlace());
for (int i = 0; i < input.numel(); ++i) { for (int i = 0; i < input.numel(); ++i) {
...@@ -33,8 +31,7 @@ void SetupTensor(paddle::framework::LoDTensor& input, ...@@ -33,8 +31,7 @@ void SetupTensor(paddle::framework::LoDTensor& input,
template <typename T> template <typename T>
void SetupTensor(paddle::framework::LoDTensor& input, void SetupTensor(paddle::framework::LoDTensor& input,
paddle::framework::DDim dims, paddle::framework::DDim dims, std::vector<T>& data) {
std::vector<T>& data) {
CHECK_EQ(paddle::framework::product(dims), static_cast<int64_t>(data.size())); CHECK_EQ(paddle::framework::product(dims), static_cast<int64_t>(data.size()));
T* input_ptr = input.mutable_data<T>(dims, paddle::platform::CPUPlace()); T* input_ptr = input.mutable_data<T>(dims, paddle::platform::CPUPlace());
memcpy(input_ptr, data.data(), input.numel() * sizeof(T)); memcpy(input_ptr, data.data(), input.numel() * sizeof(T));
...@@ -42,9 +39,7 @@ void SetupTensor(paddle::framework::LoDTensor& input, ...@@ -42,9 +39,7 @@ void SetupTensor(paddle::framework::LoDTensor& input,
template <typename T> template <typename T>
void SetupLoDTensor(paddle::framework::LoDTensor& input, void SetupLoDTensor(paddle::framework::LoDTensor& input,
paddle::framework::LoD& lod, paddle::framework::LoD& lod, T lower, T upper) {
T lower,
T upper) {
input.set_lod(lod); input.set_lod(lod);
int dim = lod[0][lod[0].size() - 1]; int dim = lod[0][lod[0].size() - 1];
SetupTensor<T>(input, {dim, 1}, lower, upper); SetupTensor<T>(input, {dim, 1}, lower, upper);
...@@ -52,8 +47,7 @@ void SetupLoDTensor(paddle::framework::LoDTensor& input, ...@@ -52,8 +47,7 @@ void SetupLoDTensor(paddle::framework::LoDTensor& input,
template <typename T> template <typename T>
void SetupLoDTensor(paddle::framework::LoDTensor& input, void SetupLoDTensor(paddle::framework::LoDTensor& input,
paddle::framework::DDim dims, paddle::framework::DDim dims, paddle::framework::LoD lod,
paddle::framework::LoD lod,
std::vector<T>& data) { std::vector<T>& data) {
const size_t level = lod.size() - 1; const size_t level = lod.size() - 1;
CHECK_EQ(dims[0], static_cast<int64_t>((lod[level]).back())); CHECK_EQ(dims[0], static_cast<int64_t>((lod[level]).back()));
...@@ -92,8 +86,7 @@ template <typename Place> ...@@ -92,8 +86,7 @@ template <typename Place>
void TestInference(const std::string& dirname, void TestInference(const std::string& dirname,
const std::vector<paddle::framework::LoDTensor*>& cpu_feeds, const std::vector<paddle::framework::LoDTensor*>& cpu_feeds,
std::vector<paddle::framework::LoDTensor*>& cpu_fetchs, std::vector<paddle::framework::LoDTensor*>& cpu_fetchs,
const int repeat = 1, const int repeat = 1, const bool is_combined = false) {
const bool is_combined = false) {
// 1. Define place, executor, scope // 1. Define place, executor, scope
auto place = Place(); auto place = Place();
auto executor = paddle::framework::Executor(place); auto executor = paddle::framework::Executor(place);
...@@ -132,11 +125,9 @@ void TestInference(const std::string& dirname, ...@@ -132,11 +125,9 @@ void TestInference(const std::string& dirname,
// `fluid.io.save_inference_model`. // `fluid.io.save_inference_model`.
std::string prog_filename = "__model_combined__"; std::string prog_filename = "__model_combined__";
std::string param_filename = "__params_combined__"; std::string param_filename = "__params_combined__";
inference_program = inference_program = paddle::inference::Load(
paddle::inference::Load(executor, executor, *scope, dirname + "/" + prog_filename,
*scope, dirname + "/" + param_filename);
dirname + "/" + prog_filename,
dirname + "/" + param_filename);
} else { } else {
// Parameters are saved in separate files sited in the specified // Parameters are saved in separate files sited in the specified
// `dirname`. // `dirname`.
......
---
Language: Cpp
BasedOnStyle: Google
Standard: Cpp11
...
---
Language: Cpp
BasedOnStyle: Google
Standard: Cpp11
...
---
Language: Cpp
BasedOnStyle: Google
Standard: Cpp11
...
---
Language: Cpp
BasedOnStyle: Google
Standard: Cpp11
...
...@@ -58,8 +58,8 @@ static void ReadStreamByBuf(std::istream& in, size_t limit, Callback callback) { ...@@ -58,8 +58,8 @@ static void ReadStreamByBuf(std::istream& in, size_t limit, Callback callback) {
* Copy stream in to another stream * Copy stream in to another stream
*/ */
static void PipeStream(std::istream& in, std::ostream& os) { static void PipeStream(std::istream& in, std::ostream& os) {
ReadStreamByBuf( ReadStreamByBuf(in, 0,
in, 0, [&os](const char* buf, size_t len) { os.write(buf, len); }); [&os](const char* buf, size_t len) { os.write(buf, len); });
} }
/** /**
...@@ -68,8 +68,8 @@ static void PipeStream(std::istream& in, std::ostream& os) { ...@@ -68,8 +68,8 @@ static void PipeStream(std::istream& in, std::ostream& os) {
static uint32_t Crc32Stream(std::istream& in, size_t limit = 0) { static uint32_t Crc32Stream(std::istream& in, size_t limit = 0) {
uint32_t crc = static_cast<uint32_t>(crc32(0, nullptr, 0)); uint32_t crc = static_cast<uint32_t>(crc32(0, nullptr, 0));
ReadStreamByBuf(in, limit, [&crc](const char* buf, size_t len) { ReadStreamByBuf(in, limit, [&crc](const char* buf, size_t len) {
crc = static_cast<uint32_t>(crc32( crc = static_cast<uint32_t>(crc32(crc, reinterpret_cast<const Bytef*>(buf),
crc, reinterpret_cast<const Bytef*>(buf), static_cast<uInt>(len))); static_cast<uInt>(len)));
}); });
return crc; return crc;
} }
......
...@@ -24,7 +24,7 @@ namespace recordio { ...@@ -24,7 +24,7 @@ namespace recordio {
// A Chunk contains the Header and optionally compressed records. // A Chunk contains the Header and optionally compressed records.
class Chunk { class Chunk {
public: public:
Chunk() : num_bytes_(0) {} Chunk() : num_bytes_(0) {}
void Add(const std::string& buf) { void Add(const std::string& buf) {
num_bytes_ += buf.size(); num_bytes_ += buf.size();
...@@ -46,7 +46,7 @@ public: ...@@ -46,7 +46,7 @@ public:
bool Empty() const { return records_.empty(); } bool Empty() const { return records_.empty(); }
private: private:
std::vector<std::string> records_; std::vector<std::string> records_;
// sum of record lengths in bytes. // sum of record lengths in bytes.
size_t num_bytes_; size_t num_bytes_;
......
...@@ -37,7 +37,7 @@ enum class Compressor : uint32_t { ...@@ -37,7 +37,7 @@ enum class Compressor : uint32_t {
// Header is the metadata of Chunk // Header is the metadata of Chunk
class Header { class Header {
public: public:
Header(); Header();
Header(uint32_t num, uint32_t sum, Compressor ct, uint32_t cs); Header(uint32_t num, uint32_t sum, Compressor ct, uint32_t cs);
...@@ -51,7 +51,7 @@ public: ...@@ -51,7 +51,7 @@ public:
Compressor CompressType() const { return compressor_; } Compressor CompressType() const { return compressor_; }
uint32_t CompressSize() const { return compress_size_; } uint32_t CompressSize() const { return compress_size_; }
private: private:
uint32_t num_records_; uint32_t num_records_;
uint32_t checksum_; uint32_t checksum_;
Compressor compressor_; Compressor compressor_;
......
...@@ -21,7 +21,7 @@ namespace paddle { ...@@ -21,7 +21,7 @@ namespace paddle {
namespace recordio { namespace recordio {
class Scanner { class Scanner {
public: public:
explicit Scanner(std::unique_ptr<std::istream>&& stream); explicit Scanner(std::unique_ptr<std::istream>&& stream);
explicit Scanner(const std::string& filename); explicit Scanner(const std::string& filename);
...@@ -32,7 +32,7 @@ public: ...@@ -32,7 +32,7 @@ public:
bool HasNext() const; bool HasNext() const;
private: private:
std::unique_ptr<std::istream> stream_; std::unique_ptr<std::istream> stream_;
Chunk cur_chunk_; Chunk cur_chunk_;
size_t offset_; size_t offset_;
......
...@@ -18,9 +18,8 @@ namespace paddle { ...@@ -18,9 +18,8 @@ namespace paddle {
namespace recordio { namespace recordio {
class Writer { class Writer {
public: public:
Writer(std::ostream* sout, Writer(std::ostream* sout, Compressor compressor,
Compressor compressor,
size_t max_num_records_in_chunk = 1000) size_t max_num_records_in_chunk = 1000)
: stream_(*sout), : stream_(*sout),
max_num_records_in_chunk_(max_num_records_in_chunk), max_num_records_in_chunk_(max_num_records_in_chunk),
...@@ -32,7 +31,7 @@ public: ...@@ -32,7 +31,7 @@ public:
~Writer(); ~Writer();
private: private:
std::ostream& stream_; std::ostream& stream_;
size_t max_num_records_in_chunk_; size_t max_num_records_in_chunk_;
Chunk cur_chunk_; Chunk cur_chunk_;
......
../framework/.clang-format
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册