diff --git a/lite/api/benchmark.cc b/lite/api/benchmark.cc index c137324b576f9f9399669a5e68d948b9921e4866..50a2797d0f57e7a1a78a40357c9a5843f176d0e4 100644 --- a/lite/api/benchmark.cc +++ b/lite/api/benchmark.cc @@ -44,10 +44,7 @@ void OutputOptModel(const std::string& load_model_dir, const std::vector>& input_shapes) { lite_api::CxxConfig config; config.set_model_dir(load_model_dir); - std::vector vaild_places = { - Place{TARGET(kARM), PRECISION(kFloat)}, - Place{TARGET(kX86), PRECISION(kFloat)}, - }; + std::vector vaild_places = {Place{TARGET(kARM), PRECISION(kFloat)}}; if (FLAGS_is_quantized_model) { vaild_places.insert(vaild_places.begin(), Place{TARGET(kARM), PRECISION(kInt8)}); diff --git a/lite/api/model_test.cc b/lite/api/model_test.cc index 1358267000991c81b80453669cf46638449b8a7b..aadb8df755e5bf7fc6d0d6eb759a8a69afb97389 100644 --- a/lite/api/model_test.cc +++ b/lite/api/model_test.cc @@ -46,10 +46,7 @@ void OutputOptModel(const std::string& load_model_dir, const std::vector>& input_shapes) { lite_api::CxxConfig config; config.set_model_dir(load_model_dir); - config.set_valid_places({ - Place{TARGET(kX86), PRECISION(kFloat)}, - Place{TARGET(kARM), PRECISION(kFloat)}, - }); + config.set_valid_places({Place{TARGET(kARM), PRECISION(kFloat)}}); auto predictor = lite_api::CreatePaddlePredictor(config); // delete old optimized model