diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c index e8a083743e09276920a009dab4a6eeea79138cc9..49ce5777c51152804046b98d804922c311fc43b4 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c @@ -255,8 +255,30 @@ static void *i915_gem_object_map(struct drm_i915_gem_object *obj, return NULL; /* A single page can always be kmapped */ - if (n_pte == 1 && type == I915_MAP_WB) - return kmap(sg_page(sgt->sgl)); + if (n_pte == 1 && type == I915_MAP_WB) { + struct page *page = sg_page(sgt->sgl); + + /* + * On 32b, highmem using a finite set of indirect PTE (i.e. + * vmap) to provide virtual mappings of the high pages. + * As these are finite, map_new_virtual() must wait for some + * other kmap() to finish when it runs out. If we map a large + * number of objects, there is no method for it to tell us + * to release the mappings, and we deadlock. + * + * However, if we make an explicit vmap of the page, that + * uses a larger vmalloc arena, and also has the ability + * to tell us to release unwanted mappings. Most importantly, + * it will fail and propagate an error instead of waiting + * forever. + * + * So if the page is beyond the 32b boundary, make an explicit + * vmap. On 64b, this check will be optimised away as we can + * directly kmap any page on the system. + */ + if (!PageHighMem(page)) + return kmap(page); + } mem = stack; if (n_pte > ARRAY_SIZE(stack)) {