diff --git a/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc b/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc index 6c38809b4321530477829487af0a79f273d08f32..a8c29579e12e7fba1f7881cbce26c53da1e80aab 100644 --- a/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc +++ b/paddle/fluid/inference/analysis/ir_passes/lite_subgraph_pass.cc @@ -274,7 +274,9 @@ void LiteSubgraphPass::SetUpEngine( } else if (use_xpu) { target_type = TARGET(kXPU); } else if (use_nnadapter) { +#ifdef LITE_WITH_NNADAPTER target_type = TARGET(kNNAdapter); +#endif } else { #ifdef PADDLE_WITH_ARM target_type = TARGET(kARM); diff --git a/paddle/fluid/inference/api/paddle_analysis_config.h b/paddle/fluid/inference/api/paddle_analysis_config.h index d6a0b643c2aeee41d501034462aaf6a5f48b9f27..f381b5fb23e4b81b09fa58ff038b2e90f9470c1f 100644 --- a/paddle/fluid/inference/api/paddle_analysis_config.h +++ b/paddle/fluid/inference/api/paddle_analysis_config.h @@ -823,7 +823,7 @@ struct PD_INFER_DECL AnalysisConfig { // XPU related. bool use_xpu_{false}; int xpu_device_id_{0}; - int xpu_l3_workspace_size_; + int xpu_l3_workspace_size_{0}; bool xpu_locked_; bool xpu_autotune_; std::string xpu_autotune_file_;