未验证 提交 797a7184 编写于 作者: Y Yi Wang 提交者: GitHub

Unify Fluid code to Google C++ style (#9685)

上级 d00bd9eb
......@@ -41,8 +41,7 @@ bool IsPersistable(const framework::VarDesc* var) {
return false;
}
void LoadPersistables(framework::Executor& executor,
framework::Scope& scope,
void LoadPersistables(framework::Executor& executor, framework::Scope& scope,
const framework::ProgramDesc& main_program,
const std::string& dirname,
const std::string& param_filename) {
......@@ -108,10 +107,8 @@ std::unique_ptr<framework::ProgramDesc> Load(framework::Executor& executor,
}
std::unique_ptr<framework::ProgramDesc> Load(
framework::Executor& executor,
framework::Scope& scope,
const std::string& prog_filename,
const std::string& param_filename) {
framework::Executor& executor, framework::Scope& scope,
const std::string& prog_filename, const std::string& param_filename) {
std::string model_filename = prog_filename;
std::string program_desc_str;
ReadBinaryFile(model_filename, program_desc_str);
......
......@@ -24,8 +24,7 @@ limitations under the License. */
namespace paddle {
namespace inference {
void LoadPersistables(framework::Executor& executor,
framework::Scope& scope,
void LoadPersistables(framework::Executor& executor, framework::Scope& scope,
const framework::ProgramDesc& main_program,
const std::string& dirname,
const std::string& param_filename);
......
......@@ -30,8 +30,8 @@ TEST(inference, fit_a_line) {
// The second dim of the input tensor should be 13
// The input data should be >= 0
int64_t batch_size = 10;
SetupTensor<float>(
input, {batch_size, 13}, static_cast<float>(0), static_cast<float>(10));
SetupTensor<float>(input, {batch_size, 13}, static_cast<float>(0),
static_cast<float>(10));
std::vector<paddle::framework::LoDTensor*> cpu_feeds;
cpu_feeds.push_back(&input);
......
......@@ -35,10 +35,8 @@ TEST(inference, image_classification) {
paddle::framework::LoDTensor input;
// Use normilized image pixels as input data,
// which should be in the range [0.0, 1.0].
SetupTensor<float>(input,
{FLAGS_batch_size, 3, 32, 32},
static_cast<float>(0),
static_cast<float>(1));
SetupTensor<float>(input, {FLAGS_batch_size, 3, 32, 32},
static_cast<float>(0), static_cast<float>(1));
std::vector<paddle::framework::LoDTensor*> cpu_feeds;
cpu_feeds.push_back(&input);
......@@ -48,8 +46,8 @@ TEST(inference, image_classification) {
// Run inference on CPU
LOG(INFO) << "--- CPU Runs: ---";
TestInference<paddle::platform::CPUPlace>(
dirname, cpu_feeds, cpu_fetchs1, FLAGS_repeat);
TestInference<paddle::platform::CPUPlace>(dirname, cpu_feeds, cpu_fetchs1,
FLAGS_repeat);
LOG(INFO) << output1.dims();
#ifdef PADDLE_WITH_CUDA
......@@ -59,8 +57,8 @@ TEST(inference, image_classification) {
// Run inference on CUDA GPU
LOG(INFO) << "--- GPU Runs: ---";
TestInference<paddle::platform::CUDAPlace>(
dirname, cpu_feeds, cpu_fetchs2, FLAGS_repeat);
TestInference<paddle::platform::CUDAPlace>(dirname, cpu_feeds, cpu_fetchs2,
FLAGS_repeat);
LOG(INFO) << output2.dims();
CheckError<float>(output1, output2);
......
......@@ -36,37 +36,21 @@ TEST(inference, label_semantic_roles) {
int64_t predicate_dict_len = 3162;
int64_t mark_dict_len = 2;
SetupLoDTensor(word,
lod,
static_cast<int64_t>(0),
SetupLoDTensor(word, lod, static_cast<int64_t>(0),
static_cast<int64_t>(word_dict_len - 1));
SetupLoDTensor(predicate,
lod,
static_cast<int64_t>(0),
SetupLoDTensor(predicate, lod, static_cast<int64_t>(0),
static_cast<int64_t>(predicate_dict_len - 1));
SetupLoDTensor(ctx_n2,
lod,
static_cast<int64_t>(0),
SetupLoDTensor(ctx_n2, lod, static_cast<int64_t>(0),
static_cast<int64_t>(word_dict_len - 1));
SetupLoDTensor(ctx_n1,
lod,
static_cast<int64_t>(0),
SetupLoDTensor(ctx_n1, lod, static_cast<int64_t>(0),
static_cast<int64_t>(word_dict_len - 1));
SetupLoDTensor(ctx_0,
lod,
static_cast<int64_t>(0),
SetupLoDTensor(ctx_0, lod, static_cast<int64_t>(0),
static_cast<int64_t>(word_dict_len - 1));
SetupLoDTensor(ctx_p1,
lod,
static_cast<int64_t>(0),
SetupLoDTensor(ctx_p1, lod, static_cast<int64_t>(0),
static_cast<int64_t>(word_dict_len - 1));
SetupLoDTensor(ctx_p2,
lod,
static_cast<int64_t>(0),
SetupLoDTensor(ctx_p2, lod, static_cast<int64_t>(0),
static_cast<int64_t>(word_dict_len - 1));
SetupLoDTensor(mark,
lod,
static_cast<int64_t>(0),
SetupLoDTensor(mark, lod, static_cast<int64_t>(0),
static_cast<int64_t>(mark_dict_len - 1));
std::vector<paddle::framework::LoDTensor*> cpu_feeds;
......
......@@ -35,10 +35,8 @@ TEST(inference, recognize_digits) {
paddle::framework::LoDTensor input;
// Use normilized image pixels as input data,
// which should be in the range [-1.0, 1.0].
SetupTensor<float>(input,
{FLAGS_batch_size, 1, 28, 28},
static_cast<float>(-1),
static_cast<float>(1));
SetupTensor<float>(input, {FLAGS_batch_size, 1, 28, 28},
static_cast<float>(-1), static_cast<float>(1));
std::vector<paddle::framework::LoDTensor*> cpu_feeds;
cpu_feeds.push_back(&input);
......@@ -49,8 +47,8 @@ TEST(inference, recognize_digits) {
// Run inference on CPU
LOG(INFO) << "--- CPU Runs: is_combined=" << is_combined << " ---";
TestInference<paddle::platform::CPUPlace>(
dirname, cpu_feeds, cpu_fetchs1, FLAGS_repeat, is_combined);
TestInference<paddle::platform::CPUPlace>(dirname, cpu_feeds, cpu_fetchs1,
FLAGS_repeat, is_combined);
LOG(INFO) << output1.dims();
#ifdef PADDLE_WITH_CUDA
......@@ -60,8 +58,8 @@ TEST(inference, recognize_digits) {
// Run inference on CUDA GPU
LOG(INFO) << "--- GPU Runs: is_combined=" << is_combined << " ---";
TestInference<paddle::platform::CUDAPlace>(
dirname, cpu_feeds, cpu_fetchs2, FLAGS_repeat, is_combined);
TestInference<paddle::platform::CUDAPlace>(dirname, cpu_feeds, cpu_fetchs2,
FLAGS_repeat, is_combined);
LOG(INFO) << output2.dims();
CheckError<float>(output1, output2);
......
......@@ -32,10 +32,10 @@ TEST(inference, rnn_encoder_decoder) {
paddle::framework::LoDTensor word_data, trg_word;
paddle::framework::LoD lod{{0, 4, 10}};
SetupLoDTensor(
word_data, lod, static_cast<int64_t>(0), static_cast<int64_t>(1));
SetupLoDTensor(
trg_word, lod, static_cast<int64_t>(0), static_cast<int64_t>(1));
SetupLoDTensor(word_data, lod, static_cast<int64_t>(0),
static_cast<int64_t>(1));
SetupLoDTensor(trg_word, lod, static_cast<int64_t>(0),
static_cast<int64_t>(1));
std::vector<paddle::framework::LoDTensor*> cpu_feeds;
cpu_feeds.push_back(&word_data);
......
......@@ -33,9 +33,7 @@ TEST(inference, understand_sentiment) {
paddle::framework::LoD lod{{0, 4, 10}};
int64_t word_dict_len = 5147;
SetupLoDTensor(words,
lod,
static_cast<int64_t>(0),
SetupLoDTensor(words, lod, static_cast<int64_t>(0),
static_cast<int64_t>(word_dict_len - 1));
std::vector<paddle::framework::LoDTensor*> cpu_feeds;
......
......@@ -19,9 +19,7 @@ limitations under the License. */
template <typename T>
void SetupTensor(paddle::framework::LoDTensor& input,
paddle::framework::DDim dims,
T lower,
T upper) {
paddle::framework::DDim dims, T lower, T upper) {
srand(time(0));
T* input_ptr = input.mutable_data<T>(dims, paddle::platform::CPUPlace());
for (int i = 0; i < input.numel(); ++i) {
......@@ -33,8 +31,7 @@ void SetupTensor(paddle::framework::LoDTensor& input,
template <typename T>
void SetupTensor(paddle::framework::LoDTensor& input,
paddle::framework::DDim dims,
std::vector<T>& data) {
paddle::framework::DDim dims, std::vector<T>& data) {
CHECK_EQ(paddle::framework::product(dims), static_cast<int64_t>(data.size()));
T* input_ptr = input.mutable_data<T>(dims, paddle::platform::CPUPlace());
memcpy(input_ptr, data.data(), input.numel() * sizeof(T));
......@@ -42,9 +39,7 @@ void SetupTensor(paddle::framework::LoDTensor& input,
template <typename T>
void SetupLoDTensor(paddle::framework::LoDTensor& input,
paddle::framework::LoD& lod,
T lower,
T upper) {
paddle::framework::LoD& lod, T lower, T upper) {
input.set_lod(lod);
int dim = lod[0][lod[0].size() - 1];
SetupTensor<T>(input, {dim, 1}, lower, upper);
......@@ -52,8 +47,7 @@ void SetupLoDTensor(paddle::framework::LoDTensor& input,
template <typename T>
void SetupLoDTensor(paddle::framework::LoDTensor& input,
paddle::framework::DDim dims,
paddle::framework::LoD lod,
paddle::framework::DDim dims, paddle::framework::LoD lod,
std::vector<T>& data) {
const size_t level = lod.size() - 1;
CHECK_EQ(dims[0], static_cast<int64_t>((lod[level]).back()));
......@@ -92,8 +86,7 @@ template <typename Place>
void TestInference(const std::string& dirname,
const std::vector<paddle::framework::LoDTensor*>& cpu_feeds,
std::vector<paddle::framework::LoDTensor*>& cpu_fetchs,
const int repeat = 1,
const bool is_combined = false) {
const int repeat = 1, const bool is_combined = false) {
// 1. Define place, executor, scope
auto place = Place();
auto executor = paddle::framework::Executor(place);
......@@ -132,10 +125,8 @@ void TestInference(const std::string& dirname,
// `fluid.io.save_inference_model`.
std::string prog_filename = "__model_combined__";
std::string param_filename = "__params_combined__";
inference_program =
paddle::inference::Load(executor,
*scope,
dirname + "/" + prog_filename,
inference_program = paddle::inference::Load(
executor, *scope, dirname + "/" + prog_filename,
dirname + "/" + param_filename);
} else {
// Parameters are saved in separate files sited in the specified
......
---
Language: Cpp
BasedOnStyle: Google
Standard: Cpp11
...
---
Language: Cpp
BasedOnStyle: Google
Standard: Cpp11
...
---
Language: Cpp
BasedOnStyle: Google
Standard: Cpp11
...
---
Language: Cpp
BasedOnStyle: Google
Standard: Cpp11
...
......@@ -58,8 +58,8 @@ static void ReadStreamByBuf(std::istream& in, size_t limit, Callback callback) {
* Copy stream in to another stream
*/
static void PipeStream(std::istream& in, std::ostream& os) {
ReadStreamByBuf(
in, 0, [&os](const char* buf, size_t len) { os.write(buf, len); });
ReadStreamByBuf(in, 0,
[&os](const char* buf, size_t len) { os.write(buf, len); });
}
/**
......@@ -68,8 +68,8 @@ static void PipeStream(std::istream& in, std::ostream& os) {
static uint32_t Crc32Stream(std::istream& in, size_t limit = 0) {
uint32_t crc = static_cast<uint32_t>(crc32(0, nullptr, 0));
ReadStreamByBuf(in, limit, [&crc](const char* buf, size_t len) {
crc = static_cast<uint32_t>(crc32(
crc, reinterpret_cast<const Bytef*>(buf), static_cast<uInt>(len)));
crc = static_cast<uint32_t>(crc32(crc, reinterpret_cast<const Bytef*>(buf),
static_cast<uInt>(len)));
});
return crc;
}
......
......@@ -24,7 +24,7 @@ namespace recordio {
// A Chunk contains the Header and optionally compressed records.
class Chunk {
public:
public:
Chunk() : num_bytes_(0) {}
void Add(const std::string& buf) {
num_bytes_ += buf.size();
......@@ -46,7 +46,7 @@ public:
bool Empty() const { return records_.empty(); }
private:
private:
std::vector<std::string> records_;
// sum of record lengths in bytes.
size_t num_bytes_;
......
......@@ -37,7 +37,7 @@ enum class Compressor : uint32_t {
// Header is the metadata of Chunk
class Header {
public:
public:
Header();
Header(uint32_t num, uint32_t sum, Compressor ct, uint32_t cs);
......@@ -51,7 +51,7 @@ public:
Compressor CompressType() const { return compressor_; }
uint32_t CompressSize() const { return compress_size_; }
private:
private:
uint32_t num_records_;
uint32_t checksum_;
Compressor compressor_;
......
......@@ -21,7 +21,7 @@ namespace paddle {
namespace recordio {
class Scanner {
public:
public:
explicit Scanner(std::unique_ptr<std::istream>&& stream);
explicit Scanner(const std::string& filename);
......@@ -32,7 +32,7 @@ public:
bool HasNext() const;
private:
private:
std::unique_ptr<std::istream> stream_;
Chunk cur_chunk_;
size_t offset_;
......
......@@ -18,9 +18,8 @@ namespace paddle {
namespace recordio {
class Writer {
public:
Writer(std::ostream* sout,
Compressor compressor,
public:
Writer(std::ostream* sout, Compressor compressor,
size_t max_num_records_in_chunk = 1000)
: stream_(*sout),
max_num_records_in_chunk_(max_num_records_in_chunk),
......@@ -32,7 +31,7 @@ public:
~Writer();
private:
private:
std::ostream& stream_;
size_t max_num_records_in_chunk_;
Chunk cur_chunk_;
......
../framework/.clang-format
\ No newline at end of file
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册