// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/fluid/inference/api/onnxruntime_predictor.h" #include #include #include #include // NOLINT #include #include "paddle/fluid/framework/ir/pass.h" #include "paddle/fluid/framework/tensor.h" #include "paddle/fluid/inference/api/helper.h" #include "paddle/fluid/inference/api/paddle_api.h" #include "paddle/fluid/inference/api/paddle_inference_api.h" #include "paddle/fluid/inference/tests/api/tester_helper.h" #include "paddle/fluid/inference/utils/io_utils.h" #include "paddle/fluid/platform/cpu_info.h" DEFINE_string(dirname, "", "dirname to tests."); namespace paddle { TEST(ONNXRuntimePredictor, onnxruntime_on) { AnalysisConfig config; config.SetModel(FLAGS_dirname + "/inference.pdmodel", FLAGS_dirname + "/inference.pdiparams"); config.EnableONNXRuntime(); config.EnableORTOptimization(); config.SetCpuMathLibraryNumThreads(2); LOG(INFO) << config.Summary(); auto _predictor = CreatePaddlePredictor(config); ASSERT_TRUE(_predictor); auto* predictor = static_cast(_predictor.get()); ASSERT_TRUE(predictor); ASSERT_TRUE(!predictor->Clone()); // Dummy Input Data std::vector input_shape = {-1, 3, 224, 224}; std::vector input_data(1 * 3 * 224 * 224, 1.0); std::vector out_data; out_data.resize(1000); // testing all interfaces auto input_names = predictor->GetInputNames(); auto output_names = predictor->GetOutputNames(); auto get_input_shape = predictor->GetInputTensorShape(); ASSERT_EQ(input_names.size(), 1UL); ASSERT_EQ(output_names.size(), 1UL); ASSERT_EQ(input_names[0], "inputs"); ASSERT_EQ(output_names[0], "save_infer_model/scale_0.tmp_1"); ASSERT_EQ(get_input_shape["inputs"], input_shape); auto input_tensor = predictor->GetInputTensor(input_names[0]); input_tensor->Reshape({1, 3, 224, 224}); auto output_tensor = predictor->GetOutputTensor(output_names[0]); input_tensor->CopyFromCpu(input_data.data()); ASSERT_TRUE(predictor->ZeroCopyRun()); output_tensor->CopyToCpu(out_data.data()); predictor->TryShrinkMemory(); } } // namespace paddle