diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index dc9bf5282071360a62fc25539ff8f0a1ebff1bc7..07046fa4c6a9fff118bf4cc8c299445d9fe0791d 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -488,6 +488,37 @@ __i915_gem_userptr_set_active(struct drm_i915_gem_object *obj,
 	return ret;
 }
 
+static bool noncontiguous_or_overlaps_ggtt(struct drm_i915_gem_object *obj,
+					   struct mm_struct *mm)
+{
+	const struct vm_operations_struct *gem_vm_ops =
+		obj->base.dev->driver->gem_vm_ops;
+	unsigned long addr = obj->userptr.ptr;
+	const unsigned long end = addr + obj->base.size;
+	struct vm_area_struct *vma;
+
+	/* Check for a contiguous set of vma covering the userptr, if any
+	 * are absent, they will EFAULT. More importantly if any point back
+	 * to a drm_i915_gem_object GTT mmaping, we may trigger a deadlock
+	 * between the deferred gup of this userptr and the object being
+	 * unbound calling invalidate_range -> cancel_userptr.
+	 */
+	for (vma = find_vma(mm, addr); vma; vma = vma->vm_next) {
+		if (vma->vm_start > addr) /* gap */
+			break;
+
+		if (vma->vm_ops == gem_vm_ops) /* GTT mmapping */
+			break;
+
+		if (vma->vm_end >= end)
+			return false;
+
+		addr = vma->vm_end;
+	}
+
+	return true;
+}
+
 static void
 __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 {
@@ -556,8 +587,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
 }
 
 static struct sg_table *
-__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
-				      bool *active)
+__i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj)
 {
 	struct get_pages_work *work;
 
@@ -594,7 +624,6 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj,
 	INIT_WORK(&work->work, __i915_gem_userptr_get_pages_worker);
 	schedule_work(&work->work);
 
-	*active = true;
 	return ERR_PTR(-EAGAIN);
 }
 
@@ -602,10 +631,11 @@ static struct sg_table *
 i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 {
 	const int num_pages = obj->base.size >> PAGE_SHIFT;
+	struct mm_struct *mm = obj->userptr.mm->mm;
 	struct page **pvec;
 	struct sg_table *pages;
-	int pinned, ret;
 	bool active;
+	int pinned;
 
 	/* If userspace should engineer that these pages are replaced in
 	 * the vma between us binding this page into the GTT and completion
@@ -632,37 +662,43 @@ i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
 			return ERR_PTR(-EAGAIN);
 	}
 
-	/* Let the mmu-notifier know that we have begun and need cancellation */
-	ret = __i915_gem_userptr_set_active(obj, true);
-	if (ret)
-		return ERR_PTR(ret);
-
 	pvec = NULL;
 	pinned = 0;
-	if (obj->userptr.mm->mm == current->mm) {
-		pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
-				      GFP_TEMPORARY);
-		if (pvec == NULL) {
-			__i915_gem_userptr_set_active(obj, false);
-			return ERR_PTR(-ENOMEM);
-		}
 
-		pinned = __get_user_pages_fast(obj->userptr.ptr, num_pages,
-					       !obj->userptr.read_only, pvec);
+	down_read(&mm->mmap_sem);
+	if (unlikely(noncontiguous_or_overlaps_ggtt(obj, mm))) {
+		pinned = -EFAULT;
+	} else if (mm == current->mm) {
+		pvec = drm_malloc_gfp(num_pages, sizeof(struct page *),
+				      GFP_TEMPORARY |
+				      __GFP_NORETRY |
+				      __GFP_NOWARN);
+		if (pvec) /* defer to worker if malloc fails */
+			pinned = __get_user_pages_fast(obj->userptr.ptr,
+						       num_pages,
+						       !obj->userptr.read_only,
+						       pvec);
 	}
 
 	active = false;
-	if (pinned < 0)
-		pages = ERR_PTR(pinned), pinned = 0;
-	else if (pinned < num_pages)
-		pages = __i915_gem_userptr_get_pages_schedule(obj, &active);
-	else
+	if (pinned < 0) {
+		pages = ERR_PTR(pinned);
+		pinned = 0;
+	} else if (pinned < num_pages) {
+		pages = __i915_gem_userptr_get_pages_schedule(obj);
+		active = pages == ERR_PTR(-EAGAIN);
+	} else {
 		pages = __i915_gem_userptr_set_pages(obj, pvec, num_pages);
-	if (IS_ERR(pages)) {
-		__i915_gem_userptr_set_active(obj, active);
-		release_pages(pvec, pinned, 0);
+		active = !IS_ERR(pages);
 	}
+	if (active)
+		__i915_gem_userptr_set_active(obj, true);
+	up_read(&mm->mmap_sem);
+
+	if (IS_ERR(pages))
+		release_pages(pvec, pinned, 0);
 	drm_free_large(pvec);
+
 	return pages;
 }