diff --git a/lite/api/model_test.cc b/lite/api/model_test.cc index b0f7a0479f0db91b816838f9d0ee1cc31b9b232a..898521a6fc0bf6d1f02ffa9a04f4dc6c4ea02612 100644 --- a/lite/api/model_test.cc +++ b/lite/api/model_test.cc @@ -44,9 +44,15 @@ void OutputOptModel(const std::string& load_model_dir, const std::vector>& input_shapes) { lite_api::CxxConfig config; config.set_model_dir(load_model_dir); +#ifdef LITE_WITH_X86 + config.set_valid_places({Place{TARGET(kX86), PRECISION(kFloat)}, + Place{TARGET(kX86), PRECISION(kInt64)}, + Place{TARGET(kHost), PRECISION(kFloat)}}); +#else config.set_valid_places({ Place{TARGET(kARM), PRECISION(kFloat)}, }); +#endif auto predictor = lite_api::CreatePaddlePredictor(config); // delete old optimized model diff --git a/lite/api/opt.cc b/lite/api/opt.cc index efad7b74e943c29c9af1af5c14ac51621eefe576..1a2369395115b5db388cc1de44a1ff87e6d26b57 100644 --- a/lite/api/opt.cc +++ b/lite/api/opt.cc @@ -104,7 +104,8 @@ std::vector ParserValidPlaces() { valid_places.emplace_back( TARGET(kARM)); // enable kARM CPU kernel when no opencl kernel } else if (target_repr == "x86") { - valid_places.emplace_back(TARGET(kX86)); + valid_places.emplace_back(Place{TARGET(kX86), PRECISION(kFloat)}); + valid_places.emplace_back(Place{TARGET(kX86), PRECISION(kInt64)}); } else if (target_repr == "npu") { valid_places.emplace_back(TARGET(kNPU)); } else if (target_repr == "xpu") {