// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/framework/block_desc.h" #include "paddle/fluid/framework/op_desc.h" #include "paddle/fluid/framework/program_desc.h" #include "paddle/fluid/framework/scope.h" #include "paddle/fluid/inference/utils/singleton.h" #include "test/cpp/inference/api/tester_helper.h" namespace paddle { namespace inference { struct Record { std::vector data; std::vector shape; }; Record ProcessALine(const std::string &line) { VLOG(3) << "process a line"; std::vector columns; split(line, '\t', &columns); CHECK_EQ(columns.size(), 2UL) << "data format error, should be \t"; Record record; std::vector data_strs; split(columns[0], ' ', &data_strs); for (auto &d : data_strs) { record.data.push_back(std::stof(d)); } std::vector shape_strs; split(columns[1], ' ', &shape_strs); for (auto &s : shape_strs) { record.shape.push_back(std::stoi(s)); } VLOG(3) << "data size " << record.data.size(); VLOG(3) << "data shape size " << record.shape.size(); return record; } TEST(test_paddle_tensor, paddle_tensor) { std::unique_ptr predictor, analysis_predictor; AnalysisConfig config; const std::vector passes; PaddlePassBuilder testPassBuilder(passes); config.SetModel(FLAGS_infer_model + "/__model__", FLAGS_infer_model + "/__params__"); predictor = CreatePaddlePredictor(config.ToNativeConfig()); analysis_predictor = CreatePaddlePredictor(config); // Just a single batch of data. std::string line; std::ifstream file(FLAGS_infer_data); std::getline(file, line); auto record = ProcessALine(line); file.close(); // Inference. PaddleTensor input; input.shape = record.shape; input.data = PaddleBuf(record.data.data(), record.data.size() * sizeof(float)); input.dtype = PaddleDType::FLOAT32; std::vector output, analysis_output; predictor->Run({input}, &output, 1); analysis_predictor->Run({input}, &analysis_output, 1); } } // namespace inference } // namespace paddle