diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 6d5367060a5656a92d5d1b95f28dc3097fa47e78..39faa4ac966075298ea1d6fb1843d9c85cf792a5 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -158,6 +158,9 @@ static inline int page_reset_referenced(unsigned long addr) * race against modification of the referenced bit. This function * should therefore only be called if it is not mapped in any * address space. + * + * Note that the bit gets set whenever page content is changed. That means + * also when the page is modified by DMA or from inside the kernel. */ #define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY static inline int page_test_and_clear_dirty(unsigned long pfn, int mapped) diff --git a/mm/rmap.c b/mm/rmap.c index 46823fb0e801b599178f8bd548437fa03d87b8ef..cf7e99a87c32865c4e6cf5b6ed4f48f7f05aa9e0 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1151,9 +1151,11 @@ void page_remove_rmap(struct page *page) * containing the swap entry, but page not yet written to swap. * * And we can skip it on file pages, so long as the filesystem - * participates in dirty tracking; but need to catch shm and tmpfs - * and ramfs pages which have been modified since creation by read - * fault. + * participates in dirty tracking (note that this is not only an + * optimization but also solves problems caused by dirty flag in + * storage key getting set by a write from inside kernel); but need to + * catch shm and tmpfs and ramfs pages which have been modified since + * creation by read fault. * * Note that mapping must be decided above, before decrementing * mapcount (which luckily provides a barrier): once page is unmapped,