From 72c370c8d25859635c06f43935d92b1f3782fa12 Mon Sep 17 00:00:00 2001 From: Tao Luo Date: Tue, 12 May 2020 10:00:09 +0800 Subject: [PATCH] remove unused test_multi_thread_helper.h (#24399) test=develop --- .../inference/api/paddle_analysis_config.h | 2 + .../tests/test_multi_thread_helper.h | 90 ------------------- 2 files changed, 2 insertions(+), 90 deletions(-) delete mode 100644 paddle/fluid/inference/tests/test_multi_thread_helper.h diff --git a/paddle/fluid/inference/api/paddle_analysis_config.h b/paddle/fluid/inference/api/paddle_analysis_config.h index a66f71e2a8..c09c2f413a 100644 --- a/paddle/fluid/inference/api/paddle_analysis_config.h +++ b/paddle/fluid/inference/api/paddle_analysis_config.h @@ -347,6 +347,8 @@ struct AnalysisConfig { /// /// \brief Set the cache capacity of different input shapes for MKLDNN. /// Default value 0 means not caching any shape. + /// Please see MKL-DNN Data Caching Design Document: + /// https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/mkldnn/caching/caching.md /// /// \param capacity The cache capacity. /// diff --git a/paddle/fluid/inference/tests/test_multi_thread_helper.h b/paddle/fluid/inference/tests/test_multi_thread_helper.h deleted file mode 100644 index 56745f115d..0000000000 --- a/paddle/fluid/inference/tests/test_multi_thread_helper.h +++ /dev/null @@ -1,90 +0,0 @@ -/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. */ - -#pragma once - -#include -#include -#include // NOLINT -#include -#include "paddle/fluid/framework/lod_tensor.h" -#include "paddle/fluid/inference/io.h" - -void ThreadedRunInference( - const std::unique_ptr& inference_program, - paddle::framework::Executor* executor, paddle::framework::Scope* scope, - const int thread_id, - const std::vector& cpu_feeds, - const std::vector& cpu_fetchs) { - auto copy_program = std::unique_ptr( - new paddle::framework::ProgramDesc(*inference_program)); - - std::string feed_holder_name = "feed_" + paddle::string::to_string(thread_id); - std::string fetch_holder_name = - "fetch_" + paddle::string::to_string(thread_id); - copy_program->SetFeedHolderName(feed_holder_name); - copy_program->SetFetchHolderName(fetch_holder_name); - - // 3. Get the feed_target_names and fetch_target_names - const std::vector& feed_target_names = - copy_program->GetFeedTargetNames(); - const std::vector& fetch_target_names = - copy_program->GetFetchTargetNames(); - - // 4. Prepare inputs: set up maps for feed targets - std::map feed_targets; - for (size_t i = 0; i < feed_target_names.size(); ++i) { - // Please make sure that cpu_feeds[i] is right for feed_target_names[i] - feed_targets[feed_target_names[i]] = cpu_feeds[i]; - } - - // 5. Define Tensor to get the outputs: set up maps for fetch targets - std::map fetch_targets; - for (size_t i = 0; i < fetch_target_names.size(); ++i) { - fetch_targets[fetch_target_names[i]] = cpu_fetchs[i]; - } - - // 6. Run the inference program - executor->Run(*copy_program, scope, feed_targets, fetch_targets, true, - feed_holder_name, fetch_holder_name); -} - -template -void TestMultiThreadInference( - const std::string& dirname, - const std::vector>& cpu_feeds, - const std::vector>& cpu_fetchs, - const int num_threads) { - // 1. Define place, executor, scope - auto place = Place(); - auto executor = paddle::framework::Executor(place); - auto* scope = new paddle::framework::Scope(); - - // 2. Initialize the inference_program and load parameters - std::unique_ptr inference_program = - paddle::inference::Load(executor, *scope, dirname); - - std::vector threads; - for (int i = 0; i < num_threads; ++i) { - threads.push_back(new std::thread( - ThreadedRunInference, std::ref(inference_program), &executor, scope, i, - std::ref(cpu_feeds[i]), std::ref(cpu_fetchs[i]))); - } - for (int i = 0; i < num_threads; ++i) { - threads[i]->join(); - delete threads[i]; - } - - delete scope; -} -- GitLab