diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 8f51613d7f4fc5aefe1ddf7a8ad2c6863268946b..81c68a65576ca4cbfd58e915cae58f796115d1fe 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -653,6 +653,13 @@ std::unique_ptr CreatePaddlePredictor< process_level_allocator_enabled = true; } +// TODO(wilber): jetson tx2 may fail to run the model due to insufficient memory +// under the native_best_fit strategy. Modify the default allocation strategy to +// auto_growth. todo, find a more appropriate way to solve the problem. +#ifdef WITH_NV_JETSON + gflags.push_back("--allocator_strategy=auto_growth"); +#endif + if (framework::InitGflags(gflags)) { VLOG(3) << "The following gpu analysis configurations only take effect " "for the first predictor: ";