diff --git a/paddle/fluid/inference/analysis/passes/memory_optimize_pass.cc b/paddle/fluid/inference/analysis/passes/memory_optimize_pass.cc index defa0a525f6885a6206f54e6a27190ba181b74a2..5132b3b5e72ca24c04e53d0157d33676d45b2a2a 100644 --- a/paddle/fluid/inference/analysis/passes/memory_optimize_pass.cc +++ b/paddle/fluid/inference/analysis/passes/memory_optimize_pass.cc @@ -96,6 +96,7 @@ void MemoryOptimizePass::CollectVarMemorySize( const int fake_batch_size = 1; auto valid_var = [&](framework::ir::Node* node) -> bool { + // lod operator reuse may cause unknown errors. std::set invalid_op = {"while", "conditional_block", "tensorrt_engine", @@ -103,6 +104,7 @@ void MemoryOptimizePass::CollectVarMemorySize( "merge_lod_tensor_infer", "merge_lod_tensor", "equal", + "sequence_pool", "lod_reset"}; for (auto* tmp : node->inputs) { CHECK(tmp->IsOp());