提交 e2b8d7af 编写于 作者: M Martin Schwidefsky 提交者: Martin Schwidefsky

[S390] add support for nonquiescing sske

Improve performance of the sske operation by using the nonquiescing
variant if the affected page has no mappings established. On machines
with no support for the new sske variant the mask bit will be ignored.
Signed-off-by: NMartin Schwidefsky <schwidefsky@de.ibm.com>
上级 92f842ea
......@@ -108,9 +108,13 @@ typedef pte_t *pgtable_t;
#define __pgprot(x) ((pgprot_t) { (x) } )
static inline void
page_set_storage_key(unsigned long addr, unsigned int skey)
page_set_storage_key(unsigned long addr, unsigned int skey, int mapped)
{
asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
if (!mapped)
asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
: : "d" (skey), "a" (addr));
else
asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
}
static inline unsigned int
......
......@@ -590,7 +590,7 @@ static inline void rcp_unlock(pte_t *ptep)
}
/* forward declaration for SetPageUptodate in page-flags.h*/
static inline void page_clear_dirty(struct page *page);
static inline void page_clear_dirty(struct page *page, int mapped);
#include <linux/page-flags.h>
static inline void ptep_rcp_copy(pte_t *ptep)
......@@ -800,7 +800,7 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
}
dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste);
if (skey & _PAGE_CHANGED)
page_clear_dirty(page);
page_clear_dirty(page, 1);
rcp_unlock(ptep);
return dirty;
}
......@@ -975,9 +975,9 @@ static inline int page_test_dirty(struct page *page)
}
#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
static inline void page_clear_dirty(struct page *page)
static inline void page_clear_dirty(struct page *page, int mapped)
{
page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY);
page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY, mapped);
}
/*
......
......@@ -208,7 +208,8 @@ static noinline __init void init_kernel_storage_key(void)
end_pfn = PFN_UP(__pa(&_end));
for (init_pfn = 0 ; init_pfn < end_pfn; init_pfn++)
page_set_storage_key(init_pfn << PAGE_SHIFT, PAGE_DEFAULT_KEY);
page_set_storage_key(init_pfn << PAGE_SHIFT,
PAGE_DEFAULT_KEY, 0);
}
static __initdata struct sysinfo_3_2_2 vmms __aligned(PAGE_SIZE);
......
......@@ -627,7 +627,8 @@ setup_memory(void)
add_active_range(0, start_chunk, end_chunk);
pfn = max(start_chunk, start_pfn);
for (; pfn < end_chunk; pfn++)
page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
page_set_storage_key(PFN_PHYS(pfn),
PAGE_DEFAULT_KEY, 0);
}
psw_set_key(PAGE_DEFAULT_KEY);
......
......@@ -108,7 +108,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres
#endif
#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY
#define page_clear_dirty(page) do { } while (0)
#define page_clear_dirty(page, mapped) do { } while (0)
#endif
#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
......
......@@ -310,7 +310,7 @@ static inline void SetPageUptodate(struct page *page)
{
#ifdef CONFIG_S390
if (!test_and_set_bit(PG_uptodate, &page->flags))
page_clear_dirty(page);
page_clear_dirty(page, 0);
#else
/*
* Memory barrier must be issued before setting the PG_uptodate bit,
......
......@@ -745,7 +745,7 @@ int page_mkclean(struct page *page)
if (mapping) {
ret = page_mkclean_file(mapping, page);
if (page_test_dirty(page)) {
page_clear_dirty(page);
page_clear_dirty(page, 1);
ret = 1;
}
}
......@@ -942,7 +942,7 @@ void page_remove_rmap(struct page *page)
* containing the swap entry, but page not yet written to swap.
*/
if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) {
page_clear_dirty(page);
page_clear_dirty(page, 1);
set_page_dirty(page);
}
/*
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册