From 5bb7d59e3aefc77ca8c8457d35082a66beafce75 Mon Sep 17 00:00:00 2001 From: Yi Wang Date: Sat, 7 Apr 2018 00:14:10 -0700 Subject: [PATCH] Fix cpplint errors with paddle/fluid/inference (#9702) * Correct inference * Update * Update --- .../tests/book/test_inference_fit_a_line.cc | 4 +- .../test_inference_image_classification.cc | 4 +- .../test_inference_label_semantic_roles.cc | 18 +++---- .../book/test_inference_recognize_digits.cc | 4 +- .../book/test_inference_recommender_system.cc | 16 +++--- .../test_inference_rnn_encoder_decoder.cc | 6 +-- .../test_inference_understand_sentiment.cc | 4 +- .../tests/book/test_inference_word2vec.cc | 10 ++-- paddle/fluid/inference/tests/test_helper.h | 50 +++++++++++-------- 9 files changed, 61 insertions(+), 55 deletions(-) diff --git a/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc b/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc index 8d8365a8396..3e77dc166c3 100644 --- a/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc +++ b/paddle/fluid/inference/tests/book/test_inference_fit_a_line.cc @@ -9,8 +9,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -30,7 +30,7 @@ TEST(inference, fit_a_line) { // The second dim of the input tensor should be 13 // The input data should be >= 0 int64_t batch_size = 10; - SetupTensor(input, {batch_size, 13}, static_cast(0), + SetupTensor(&input, {batch_size, 13}, static_cast(0), static_cast(10)); std::vector cpu_feeds; cpu_feeds.push_back(&input); diff --git a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc index 954ca7a3e38..a6b6c3f828f 100644 --- a/paddle/fluid/inference/tests/book/test_inference_image_classification.cc +++ b/paddle/fluid/inference/tests/book/test_inference_image_classification.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -35,7 +35,7 @@ TEST(inference, image_classification) { paddle::framework::LoDTensor input; // Use normilized image pixels as input data, // which should be in the range [0.0, 1.0]. - SetupTensor(input, {FLAGS_batch_size, 3, 32, 32}, + SetupTensor(&input, {FLAGS_batch_size, 3, 32, 32}, static_cast(0), static_cast(1)); std::vector cpu_feeds; cpu_feeds.push_back(&input); diff --git a/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc b/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc index 31100494ffc..84bb855fea5 100644 --- a/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc +++ b/paddle/fluid/inference/tests/book/test_inference_label_semantic_roles.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -36,21 +36,21 @@ TEST(inference, label_semantic_roles) { int64_t predicate_dict_len = 3162; int64_t mark_dict_len = 2; - SetupLoDTensor(word, lod, static_cast(0), + SetupLoDTensor(&word, lod, static_cast(0), static_cast(word_dict_len - 1)); - SetupLoDTensor(predicate, lod, static_cast(0), + SetupLoDTensor(&predicate, lod, static_cast(0), static_cast(predicate_dict_len - 1)); - SetupLoDTensor(ctx_n2, lod, static_cast(0), + SetupLoDTensor(&ctx_n2, lod, static_cast(0), static_cast(word_dict_len - 1)); - SetupLoDTensor(ctx_n1, lod, static_cast(0), + SetupLoDTensor(&ctx_n1, lod, static_cast(0), static_cast(word_dict_len - 1)); - SetupLoDTensor(ctx_0, lod, static_cast(0), + SetupLoDTensor(&ctx_0, lod, static_cast(0), static_cast(word_dict_len - 1)); - SetupLoDTensor(ctx_p1, lod, static_cast(0), + SetupLoDTensor(&ctx_p1, lod, static_cast(0), static_cast(word_dict_len - 1)); - SetupLoDTensor(ctx_p2, lod, static_cast(0), + SetupLoDTensor(&ctx_p2, lod, static_cast(0), static_cast(word_dict_len - 1)); - SetupLoDTensor(mark, lod, static_cast(0), + SetupLoDTensor(&mark, lod, static_cast(0), static_cast(mark_dict_len - 1)); std::vector cpu_feeds; diff --git a/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc b/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc index 82275d3ee11..f12828a2685 100644 --- a/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc +++ b/paddle/fluid/inference/tests/book/test_inference_recognize_digits.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -35,7 +35,7 @@ TEST(inference, recognize_digits) { paddle::framework::LoDTensor input; // Use normilized image pixels as input data, // which should be in the range [-1.0, 1.0]. - SetupTensor(input, {FLAGS_batch_size, 1, 28, 28}, + SetupTensor(&input, {FLAGS_batch_size, 1, 28, 28}, static_cast(-1), static_cast(1)); std::vector cpu_feeds; cpu_feeds.push_back(&input); diff --git a/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc b/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc index b42a33c9a90..70aa6b194d4 100644 --- a/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc +++ b/paddle/fluid/inference/tests/book/test_inference_recommender_system.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -36,25 +36,25 @@ TEST(inference, recommender_system) { // Use the first data from paddle.dataset.movielens.test() as input std::vector user_id_data = {1}; - SetupTensor(user_id, {batch_size, 1}, user_id_data); + SetupTensor(&user_id, {batch_size, 1}, user_id_data); std::vector gender_id_data = {1}; - SetupTensor(gender_id, {batch_size, 1}, gender_id_data); + SetupTensor(&gender_id, {batch_size, 1}, gender_id_data); std::vector age_id_data = {0}; - SetupTensor(age_id, {batch_size, 1}, age_id_data); + SetupTensor(&age_id, {batch_size, 1}, age_id_data); std::vector job_id_data = {10}; - SetupTensor(job_id, {batch_size, 1}, job_id_data); + SetupTensor(&job_id, {batch_size, 1}, job_id_data); std::vector movie_id_data = {783}; - SetupTensor(movie_id, {batch_size, 1}, movie_id_data); + SetupTensor(&movie_id, {batch_size, 1}, movie_id_data); std::vector category_id_data = {10, 8, 9}; - SetupLoDTensor(category_id, {3, 1}, {{0, 3}}, category_id_data); + SetupLoDTensor(&category_id, {3, 1}, {{0, 3}}, category_id_data); std::vector movie_title_data = {1069, 4140, 2923, 710, 988}; - SetupLoDTensor(movie_title, {5, 1}, {{0, 5}}, movie_title_data); + SetupLoDTensor(&movie_title, {5, 1}, {{0, 5}}, movie_title_data); std::vector cpu_feeds; cpu_feeds.push_back(&user_id); diff --git a/paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc b/paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc index ea2d5ee092a..e15c3f59acb 100644 --- a/paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc +++ b/paddle/fluid/inference/tests/book/test_inference_rnn_encoder_decoder.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -32,9 +32,9 @@ TEST(inference, rnn_encoder_decoder) { paddle::framework::LoDTensor word_data, trg_word; paddle::framework::LoD lod{{0, 4, 10}}; - SetupLoDTensor(word_data, lod, static_cast(0), + SetupLoDTensor(&word_data, lod, static_cast(0), static_cast(1)); - SetupLoDTensor(trg_word, lod, static_cast(0), + SetupLoDTensor(&trg_word, lod, static_cast(0), static_cast(1)); std::vector cpu_feeds; diff --git a/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc b/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc index 21ffd8d3682..0dbb6a30405 100644 --- a/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc +++ b/paddle/fluid/inference/tests/book/test_inference_understand_sentiment.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -33,7 +33,7 @@ TEST(inference, understand_sentiment) { paddle::framework::LoD lod{{0, 4, 10}}; int64_t word_dict_len = 5147; - SetupLoDTensor(words, lod, static_cast(0), + SetupLoDTensor(&words, lod, static_cast(0), static_cast(word_dict_len - 1)); std::vector cpu_feeds; diff --git a/paddle/fluid/inference/tests/book/test_inference_word2vec.cc b/paddle/fluid/inference/tests/book/test_inference_word2vec.cc index 1481760c529..c9328eb21b4 100644 --- a/paddle/fluid/inference/tests/book/test_inference_word2vec.cc +++ b/paddle/fluid/inference/tests/book/test_inference_word2vec.cc @@ -12,8 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ -#include #include "gflags/gflags.h" +#include "gtest/gtest.h" #include "paddle/fluid/inference/tests/test_helper.h" DEFINE_string(dirname, "", "Directory of the inference model."); @@ -33,10 +33,10 @@ TEST(inference, word2vec) { paddle::framework::LoD lod{{0, 1}}; int64_t dict_size = 2073; // The size of dictionary - SetupLoDTensor(first_word, lod, static_cast(0), dict_size - 1); - SetupLoDTensor(second_word, lod, static_cast(0), dict_size - 1); - SetupLoDTensor(third_word, lod, static_cast(0), dict_size - 1); - SetupLoDTensor(fourth_word, lod, static_cast(0), dict_size - 1); + SetupLoDTensor(&first_word, lod, static_cast(0), dict_size - 1); + SetupLoDTensor(&second_word, lod, static_cast(0), dict_size - 1); + SetupLoDTensor(&third_word, lod, static_cast(0), dict_size - 1); + SetupLoDTensor(&fourth_word, lod, static_cast(0), dict_size - 1); std::vector cpu_feeds; cpu_feeds.push_back(&first_word); diff --git a/paddle/fluid/inference/tests/test_helper.h b/paddle/fluid/inference/tests/test_helper.h index d8ffedf6726..5118e66f1e7 100644 --- a/paddle/fluid/inference/tests/test_helper.h +++ b/paddle/fluid/inference/tests/test_helper.h @@ -11,53 +11,59 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#pragma once + +#include +#include +#include +#include -#include #include "paddle/fluid/framework/lod_tensor.h" #include "paddle/fluid/inference/io.h" #include "paddle/fluid/platform/profiler.h" template -void SetupTensor(paddle::framework::LoDTensor& input, +void SetupTensor(paddle::framework::LoDTensor* input, paddle::framework::DDim dims, T lower, T upper) { - srand(time(0)); - T* input_ptr = input.mutable_data(dims, paddle::platform::CPUPlace()); - for (int i = 0; i < input.numel(); ++i) { - input_ptr[i] = - (static_cast(rand()) / static_cast(RAND_MAX)) * (upper - lower) + - lower; + std::mt19937 rng(100); // An arbitrarily chosen but fixed seed. + std::uniform_real_distribution uniform_dist(0, 1); + + T* input_ptr = input->mutable_data(dims, paddle::platform::CPUPlace()); + for (int i = 0; i < input->numel(); ++i) { + input_ptr[i] = static_cast(uniform_dist(rng) * (upper - lower) + lower); } } template -void SetupTensor(paddle::framework::LoDTensor& input, - paddle::framework::DDim dims, std::vector& data) { +void SetupTensor(paddle::framework::LoDTensor* input, + paddle::framework::DDim dims, const std::vector& data) { CHECK_EQ(paddle::framework::product(dims), static_cast(data.size())); - T* input_ptr = input.mutable_data(dims, paddle::platform::CPUPlace()); - memcpy(input_ptr, data.data(), input.numel() * sizeof(T)); + T* input_ptr = input->mutable_data(dims, paddle::platform::CPUPlace()); + memcpy(input_ptr, data.data(), input->numel() * sizeof(T)); } template -void SetupLoDTensor(paddle::framework::LoDTensor& input, - paddle::framework::LoD& lod, T lower, T upper) { - input.set_lod(lod); +void SetupLoDTensor(paddle::framework::LoDTensor* input, + const paddle::framework::LoD& lod, T lower, T upper) { + input->set_lod(lod); int dim = lod[0][lod[0].size() - 1]; SetupTensor(input, {dim, 1}, lower, upper); } template -void SetupLoDTensor(paddle::framework::LoDTensor& input, - paddle::framework::DDim dims, paddle::framework::LoD lod, - std::vector& data) { +void SetupLoDTensor(paddle::framework::LoDTensor* input, + paddle::framework::DDim dims, + const paddle::framework::LoD lod, + const std::vector& data) { const size_t level = lod.size() - 1; CHECK_EQ(dims[0], static_cast((lod[level]).back())); - input.set_lod(lod); + input->set_lod(lod); SetupTensor(input, dims, data); } template -void CheckError(paddle::framework::LoDTensor& output1, - paddle::framework::LoDTensor& output2) { +void CheckError(const paddle::framework::LoDTensor& output1, + const paddle::framework::LoDTensor& output2) { // Check lod information EXPECT_EQ(output1.lod(), output2.lod()); @@ -85,7 +91,7 @@ void CheckError(paddle::framework::LoDTensor& output1, template void TestInference(const std::string& dirname, const std::vector& cpu_feeds, - std::vector& cpu_fetchs, + const std::vector& cpu_fetchs, const int repeat = 1, const bool is_combined = false) { // 1. Define place, executor, scope auto place = Place(); -- GitLab