// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include #include #include #include "llvm/Support/raw_ostream.h" #include "paddle/infrt/api/infrt_api.h" #include "paddle/infrt/backends/host/phi_allocator.h" #include "paddle/infrt/common/buffer.h" #include "paddle/infrt/common/dtype.h" using infrt::InfRtConfig; using infrt::InfRtPredictor; using infrt::CreateInfRtPredictor; namespace infrt { TEST(InfRtPredictor, predictor) { std::vector shared_libs; InfRtConfig config; config.set_model_dir("@CMAKE_BINARY_DIR@/linear/linear.pdmodel"); config.set_param_dir("@CMAKE_BINARY_DIR@/linear/linear.pdiparams"); std::unique_ptr predictor = CreateInfRtPredictor(config); ::infrt::backends::CpuPhiAllocator cpu_allocator; ::phi::DenseTensor* input = predictor->GetInput(0); input->Resize({16, 784}); input->AllocateFrom(&cpu_allocator, ::phi::DataType::FLOAT32); auto* input_data = reinterpret_cast(input->data()); for (int i = 0; i < input->numel(); i++) input_data[i] = 1.0; predictor->Run(); // get and print output tensor auto* output = predictor->GetOutput(0); // TODO(Shixiaowei02): Automatic result validation for training then inference. // auto* output_data = reinterpret_cast(output->data()); ASSERT_EQ(output->dims(), ::phi::DDim({16, 10})); } #ifdef INFRT_WITH_TRT TEST(InfRtPredictor, trt_predictor) { std::vector shared_libs; InfRtConfig config; config.enable_tensorrt(); config.set_model_dir("@CMAKE_BINARY_DIR@/models/resnet50/model.pdmodel"); config.set_param_dir("@CMAKE_BINARY_DIR@/models/resnet50/model.pdiparams"); std::unique_ptr predictor = CreateInfRtPredictor(config); ::infrt::backends::CpuPhiAllocator cpu_allocator; ::phi::DenseTensor* input = predictor->GetInput(0); input->Resize({2, 3, 256, 256}); input->AllocateFrom(&cpu_allocator, ::phi::DataType::FLOAT32); auto* input_data = reinterpret_cast(input->data()); for (int i = 0; i < input->numel(); i++) input_data[i] = 1.0; predictor->Run(); // get and print output tensor auto* output = predictor->GetOutput(0); ASSERT_EQ(output->dims(), ::phi::DDim({2, 1000})); const std::vector true_vals { -3.319006264209747314e-01, -1.418896913528442383e+00, -6.934890151023864746e-01, -1.498023152351379395e+00, 3.078042864799499512e-01, -1.340998053550720215e+00, 3.508620023727416992e+00, 2.274388313293457031e+00, -1.321727275848388672e+00, -8.888689428567886353e-02, -3.319006264209747314e-01, -1.418896913528442383e+00, -6.934890151023864746e-01, -1.498023152351379395e+00, 3.078042864799499512e-01, -1.340998053550720215e+00, 3.508620023727416992e+00, 2.274388313293457031e+00, -1.321727275848388672e+00, -8.888689428567886353e-02 }; for (size_t i = 0; i < true_vals.size(); i+=100) { CHECK_NEAR(output->data()[i*100], true_vals[i], 1e-5); } } #endif } // namespace infrt