提交 51726b12 编写于 作者: H Hugh Dickins 提交者: Linus Torvalds

mm: replace some BUG_ONs by VM_BUG_ONs

The swap code is over-provisioned with BUG_ONs on assorted page flags,
mostly dating back to 2.3.  They're good documentation, and guard against
developer error, but a waste of space on most systems: change them to
VM_BUG_ONs, conditional on CONFIG_DEBUG_VM.  Just delete the PagePrivate
ones: they're later, from 2.5.69, but even less interesting now.
Signed-off-by: NHugh Dickins <hugh@veritas.com>
Reviewed-by: NChristoph Lameter <cl@linux-foundation.org>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 6d91add0
...@@ -125,8 +125,8 @@ int swap_readpage(struct file *file, struct page *page) ...@@ -125,8 +125,8 @@ int swap_readpage(struct file *file, struct page *page)
struct bio *bio; struct bio *bio;
int ret = 0; int ret = 0;
BUG_ON(!PageLocked(page)); VM_BUG_ON(!PageLocked(page));
BUG_ON(PageUptodate(page)); VM_BUG_ON(PageUptodate(page));
bio = get_swap_bio(GFP_KERNEL, page_private(page), page, bio = get_swap_bio(GFP_KERNEL, page_private(page), page,
end_swap_bio_read); end_swap_bio_read);
if (bio == NULL) { if (bio == NULL) {
......
...@@ -72,10 +72,10 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) ...@@ -72,10 +72,10 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
{ {
int error; int error;
BUG_ON(!PageLocked(page)); VM_BUG_ON(!PageLocked(page));
BUG_ON(PageSwapCache(page)); VM_BUG_ON(PageSwapCache(page));
BUG_ON(PagePrivate(page)); VM_BUG_ON(!PageSwapBacked(page));
BUG_ON(!PageSwapBacked(page));
error = radix_tree_preload(gfp_mask); error = radix_tree_preload(gfp_mask);
if (!error) { if (!error) {
page_cache_get(page); page_cache_get(page);
...@@ -108,10 +108,9 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) ...@@ -108,10 +108,9 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask)
*/ */
void __delete_from_swap_cache(struct page *page) void __delete_from_swap_cache(struct page *page)
{ {
BUG_ON(!PageLocked(page)); VM_BUG_ON(!PageLocked(page));
BUG_ON(!PageSwapCache(page)); VM_BUG_ON(!PageSwapCache(page));
BUG_ON(PageWriteback(page)); VM_BUG_ON(PageWriteback(page));
BUG_ON(PagePrivate(page));
radix_tree_delete(&swapper_space.page_tree, page_private(page)); radix_tree_delete(&swapper_space.page_tree, page_private(page));
set_page_private(page, 0); set_page_private(page, 0);
...@@ -134,8 +133,8 @@ int add_to_swap(struct page * page, gfp_t gfp_mask) ...@@ -134,8 +133,8 @@ int add_to_swap(struct page * page, gfp_t gfp_mask)
swp_entry_t entry; swp_entry_t entry;
int err; int err;
BUG_ON(!PageLocked(page)); VM_BUG_ON(!PageLocked(page));
BUG_ON(!PageUptodate(page)); VM_BUG_ON(!PageUptodate(page));
for (;;) { for (;;) {
entry = get_swap_page(); entry = get_swap_page();
......
...@@ -333,7 +333,7 @@ int can_share_swap_page(struct page *page) ...@@ -333,7 +333,7 @@ int can_share_swap_page(struct page *page)
{ {
int count; int count;
BUG_ON(!PageLocked(page)); VM_BUG_ON(!PageLocked(page));
count = page_mapcount(page); count = page_mapcount(page);
if (count <= 1 && PageSwapCache(page)) if (count <= 1 && PageSwapCache(page))
count += page_swapcount(page); count += page_swapcount(page);
...@@ -350,8 +350,7 @@ static int remove_exclusive_swap_page_count(struct page *page, int count) ...@@ -350,8 +350,7 @@ static int remove_exclusive_swap_page_count(struct page *page, int count)
struct swap_info_struct * p; struct swap_info_struct * p;
swp_entry_t entry; swp_entry_t entry;
BUG_ON(PagePrivate(page)); VM_BUG_ON(!PageLocked(page));
BUG_ON(!PageLocked(page));
if (!PageSwapCache(page)) if (!PageSwapCache(page))
return 0; return 0;
...@@ -432,7 +431,6 @@ void free_swap_and_cache(swp_entry_t entry) ...@@ -432,7 +431,6 @@ void free_swap_and_cache(swp_entry_t entry)
if (page) { if (page) {
int one_user; int one_user;
BUG_ON(PagePrivate(page));
one_user = (page_count(page) == 2); one_user = (page_count(page) == 2);
/* Only cache user (+us), or swap space full? Free it! */ /* Only cache user (+us), or swap space full? Free it! */
/* Also recheck PageSwapCache after page is locked (above) */ /* Also recheck PageSwapCache after page is locked (above) */
...@@ -1209,7 +1207,7 @@ int page_queue_congested(struct page *page) ...@@ -1209,7 +1207,7 @@ int page_queue_congested(struct page *page)
{ {
struct backing_dev_info *bdi; struct backing_dev_info *bdi;
BUG_ON(!PageLocked(page)); /* It pins the swap_info_struct */ VM_BUG_ON(!PageLocked(page)); /* It pins the swap_info_struct */
if (PageSwapCache(page)) { if (PageSwapCache(page)) {
swp_entry_t entry = { .val = page_private(page) }; swp_entry_t entry = { .val = page_private(page) };
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册