diff --git a/mm/shmem.c b/mm/shmem.c index 67afba5117f2ebe80ef54e487d068fdf99e986bb..dc12264f44f571ea6c88c8e2d5ad42ce1c7087bd 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -1145,8 +1145,20 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, if (!error) { error = shmem_add_to_page_cache(page, mapping, index, gfp, swp_to_radix_entry(swap)); - /* We already confirmed swap, and make no allocation */ - VM_BUG_ON(error); + /* + * We already confirmed swap under page lock, and make + * no memory allocation here, so usually no possibility + * of error; but free_swap_and_cache() only trylocks a + * page, so it is just possible that the entry has been + * truncated or holepunched since swap was confirmed. + * shmem_undo_range() will have done some of the + * unaccounting, now delete_from_swap_cache() will do + * the rest (including mem_cgroup_uncharge_swapcache). + * Reset swap.val? No, leave it so "failed" goes back to + * "repeat": reading a hole and writing should succeed. + */ + if (error) + delete_from_swap_cache(page); } if (error) goto failed;