未验证 提交 3123d187 编写于 作者: T Tao Luo 提交者: GitHub

remove unused AnalysisPredictor::SetMkldnnThreadID() (#18444)

test=develop
上级 a873fa84
......@@ -185,14 +185,6 @@ bool AnalysisPredictor::PrepareExecutor() {
return true;
}
void AnalysisPredictor::SetMkldnnThreadID(int tid) {
#ifdef PADDLE_WITH_MKLDNN
platform::set_cur_thread_id(tid);
#else
LOG(ERROR) << "Please compile with MKLDNN first to use MKLDNN";
#endif
}
bool AnalysisPredictor::Run(const std::vector<PaddleTensor> &inputs,
std::vector<PaddleTensor> *output_data,
int batch_size) {
......
......@@ -80,8 +80,6 @@ class AnalysisPredictor : public PaddlePredictor {
framework::Scope *scope() { return scope_.get(); }
framework::ProgramDesc &program() { return *inference_program_; }
void SetMkldnnThreadID(int tid);
std::string GetSerializedProgram() const override;
bool MkldnnQuantize();
......
......@@ -144,7 +144,7 @@ std::shared_ptr<std::vector<PaddleTensor>> GetWarmupData(
int test_data_batch_size = test_data[0][0].shape[0];
auto iterations = test_data.size();
PADDLE_ENFORCE(
static_cast<int32_t>(num_images) <= iterations * test_data_batch_size,
static_cast<size_t>(num_images) <= iterations * test_data_batch_size,
"The requested quantization warmup data size " +
std::to_string(num_images) + " is bigger than all test data size.");
......
......@@ -418,12 +418,6 @@ void TestMultiThreadPrediction(
// The inputs of each thread are all the same.
std::vector<std::vector<PaddleTensor>> outputs_tid;
auto &predictor = predictors[tid];
#ifdef PADDLE_WITH_MKLDNN
if (use_analysis) {
static_cast<AnalysisPredictor *>(predictor.get())
->SetMkldnnThreadID(static_cast<int>(tid) + 1);
}
#endif
if (FLAGS_warmup) {
PredictionWarmUp(predictor.get(), inputs, &outputs_tid, num_threads,
tid);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册