diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 8bfb0b327267b461a5bb41cc5befe385f9637824..f794089d30ac21de22894774f13d4d2871213573 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -170,7 +170,7 @@ struct drm_mm_node * __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) { return drm_mm_interval_tree_iter_first((struct rb_root *)&mm->interval_tree, - start, last); + start, last) ?: (struct drm_mm_node *)&mm->head_node; } EXPORT_SYMBOL(__drm_mm_interval_first); diff --git a/drivers/gpu/drm/selftests/test-drm_mm.c b/drivers/gpu/drm/selftests/test-drm_mm.c index 1e71bc182ca96a6bda444650f3f30db15ce95df8..2958f596081ee819399e6c1d068648c7ba154657 100644 --- a/drivers/gpu/drm/selftests/test-drm_mm.c +++ b/drivers/gpu/drm/selftests/test-drm_mm.c @@ -839,16 +839,18 @@ static bool assert_contiguous_in_range(struct drm_mm *mm, n++; } - drm_mm_for_each_node_in_range(node, mm, 0, start) { - if (node) { + if (start > 0) { + node = __drm_mm_interval_first(mm, 0, start - 1); + if (node->allocated) { pr_err("node before start: node=%llx+%llu, start=%llx\n", node->start, node->size, start); return false; } } - drm_mm_for_each_node_in_range(node, mm, end, U64_MAX) { - if (node) { + if (end < U64_MAX) { + node = __drm_mm_interval_first(mm, end, U64_MAX); + if (node->allocated) { pr_err("node after end: node=%llx+%llu, end=%llx\n", node->start, node->size, end); return false; diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index d81b0ba9921fb5d855c28ed672e0352a856214c9..f262da18011723b1f504321838d575a7274822be 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -459,10 +459,13 @@ __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last); * but using the internal interval tree to accelerate the search for the * starting node, and so not safe against removal of elements. It assumes * that @end is within (or is the upper limit of) the drm_mm allocator. + * If [@start, @end] are beyond the range of the drm_mm, the iterator may walk + * over the special _unallocated_ &drm_mm.head_node, and may even continue + * indefinitely. */ #define drm_mm_for_each_node_in_range(node__, mm__, start__, end__) \ for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \ - node__ && node__->start < (end__); \ + node__->start < (end__); \ node__ = list_next_entry(node__, node_list)) void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,