From 720994b40007c3d3fc5691d6a2f0e3257e31f88f Mon Sep 17 00:00:00 2001 From: kexinzhao Date: Fri, 9 Feb 2018 02:06:33 -0800 Subject: [PATCH] Add inference example and unit-test for fit-a-line book chapter (#8208) * initial commit * minor fix * remove redundency * address comments --- paddle/inference/tests/book/CMakeLists.txt | 1 + .../tests/book/test_inference_fit_a_line.cc | 57 +++++++++++++++++++ .../v2/fluid/tests/book/test_fit_a_line.py | 48 ++++++++++++++-- 3 files changed, 100 insertions(+), 6 deletions(-) create mode 100644 paddle/inference/tests/book/test_inference_fit_a_line.cc diff --git a/paddle/inference/tests/book/CMakeLists.txt b/paddle/inference/tests/book/CMakeLists.txt index 5c866eb1e2..5d065e53b2 100644 --- a/paddle/inference/tests/book/CMakeLists.txt +++ b/paddle/inference/tests/book/CMakeLists.txt @@ -24,6 +24,7 @@ function(inference_test TARGET_NAME) endforeach() endfunction(inference_test) +inference_test(fit_a_line) inference_test(recognize_digits ARGS mlp) inference_test(image_classification ARGS vgg resnet) inference_test(label_semantic_roles) diff --git a/paddle/inference/tests/book/test_inference_fit_a_line.cc b/paddle/inference/tests/book/test_inference_fit_a_line.cc new file mode 100644 index 0000000000..201a2801cd --- /dev/null +++ b/paddle/inference/tests/book/test_inference_fit_a_line.cc @@ -0,0 +1,57 @@ +/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. */ + +#include +#include "gflags/gflags.h" +#include "test_helper.h" + +DEFINE_string(dirname, "", "Directory of the inference model."); + +TEST(inference, fit_a_line) { + if (FLAGS_dirname.empty()) { + LOG(FATAL) << "Usage: ./example --dirname=path/to/your/model"; + } + + LOG(INFO) << "FLAGS_dirname: " << FLAGS_dirname << std::endl; + std::string dirname = FLAGS_dirname; + + // 0. Call `paddle::framework::InitDevices()` initialize all the devices + // In unittests, this is done in paddle/testing/paddle_gtest_main.cc + + paddle::framework::LoDTensor input; + // The second dim of the input tensor should be 13 + // The input data should be >= 0 + int64_t batch_size = 10; + SetupTensor( + input, {batch_size, 13}, static_cast(0), static_cast(10)); + std::vector cpu_feeds; + cpu_feeds.push_back(&input); + + paddle::framework::LoDTensor output1; + std::vector cpu_fetchs1; + cpu_fetchs1.push_back(&output1); + + // Run inference on CPU + TestInference(dirname, cpu_feeds, cpu_fetchs1); + LOG(INFO) << output1.dims(); + +#ifdef PADDLE_WITH_CUDA + paddle::framework::LoDTensor output2; + std::vector cpu_fetchs2; + cpu_fetchs2.push_back(&output2); + + // Run inference on CUDA GPU + TestInference(dirname, cpu_feeds, cpu_fetchs2); + LOG(INFO) << output2.dims(); + + CheckError(output1, output2); +#endif +} diff --git a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py index 06860a2a46..b3332b4810 100644 --- a/python/paddle/v2/fluid/tests/book/test_fit_a_line.py +++ b/python/paddle/v2/fluid/tests/book/test_fit_a_line.py @@ -15,15 +15,13 @@ import paddle.v2 as paddle import paddle.v2.fluid as fluid import contextlib +import numpy import unittest import math import sys -def main(use_cuda): - if use_cuda and not fluid.core.is_compiled_with_cuda(): - return - +def train(use_cuda, save_dirname): x = fluid.layers.data(name='x', shape=[13], dtype='float32') y_predict = fluid.layers.fc(input=x, size=1, act=None) @@ -51,14 +49,15 @@ def main(use_cuda): PASS_NUM = 100 for pass_id in range(PASS_NUM): - fluid.io.save_persistables(exe, "./fit_a_line.model/") - fluid.io.load_persistables(exe, "./fit_a_line.model/") for data in train_reader(): avg_loss_value, = exe.run(fluid.default_main_program(), feed=feeder.feed(data), fetch_list=[avg_cost]) print(avg_loss_value) if avg_loss_value[0] < 10.0: + if save_dirname is not None: + fluid.io.save_inference_model(save_dirname, ['x'], + [y_predict], exe) return if math.isnan(float(avg_loss_value)): sys.exit("got NaN loss, training failed.") @@ -66,6 +65,43 @@ def main(use_cuda): avg_loss_value[0])) +def infer(use_cuda, save_dirname=None): + if save_dirname is None: + return + + place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace() + exe = fluid.Executor(place) + + # Use fluid.io.load_inference_model to obtain the inference program desc, + # the feed_target_names (the names of variables that will be feeded + # data using feed operators), and the fetch_targets (variables that + # we want to obtain data from using fetch operators). + [inference_program, feed_target_names, + fetch_targets] = fluid.io.load_inference_model(save_dirname, exe) + + # The input's dimension should be 2-D and the second dim is 13 + # The input data should be >= 0 + batch_size = 10 + tensor_x = numpy.random.uniform(0, 10, [batch_size, 13]).astype("float32") + assert feed_target_names[0] == 'x' + results = exe.run(inference_program, + feed={feed_target_names[0]: tensor_x}, + fetch_list=fetch_targets) + print("infer shape: ", results[0].shape) + print("infer results: ", results[0]) + + +def main(use_cuda): + if use_cuda and not fluid.core.is_compiled_with_cuda(): + return + + # Directory for saving the trained model + save_dirname = "fit_a_line.inference.model" + + train(use_cuda, save_dirname) + infer(use_cuda, save_dirname) + + class TestFitALine(unittest.TestCase): def test_cpu(self): with self.program_scope_guard(): -- GitLab