From f277f53c33002287e3454f94f83bbb682dcd2c0f Mon Sep 17 00:00:00 2001 From: nhzlx Date: Thu, 20 Sep 2018 08:21:02 +0000 Subject: [PATCH] out of memory... i bet it's the last time commit for this pr --- paddle/fluid/inference/tests/api/trt_models_tester.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/paddle/fluid/inference/tests/api/trt_models_tester.cc b/paddle/fluid/inference/tests/api/trt_models_tester.cc index 966f21c437f..cb6c2991e42 100644 --- a/paddle/fluid/inference/tests/api/trt_models_tester.cc +++ b/paddle/fluid/inference/tests/api/trt_models_tester.cc @@ -26,7 +26,7 @@ NativeConfig GetConfigNative() { NativeConfig config; config.model_dir = FLAGS_dirname; // LOG(INFO) << "dirname " << config.model_dir; - config.fraction_of_gpu_memory = 0.7; + config.fraction_of_gpu_memory = 0.45; config.use_gpu = true; config.device = 0; return config; @@ -36,7 +36,7 @@ TensorRTConfig GetConfigTRT() { TensorRTConfig config; config.model_dir = FLAGS_dirname; config.use_gpu = true; - config.fraction_of_gpu_memory = 0.1; + config.fraction_of_gpu_memory = 0.2; config.device = 0; config.max_batch_size = 3; return config; @@ -99,7 +99,7 @@ TEST(trt_models_test, main) { std::vector infer_models = {"mobilenet", "resnet50", "resnext50"}; for (auto &model_dir : infer_models) { - CompareTensorRTWithFluid(5, FLAGS_dirname + "/" + model_dir); + CompareTensorRTWithFluid(1, FLAGS_dirname + "/" + model_dir); } } } // namespace paddle -- GitLab