提交 8ecb1a59 编写于 作者: M Martin Schwidefsky 提交者: Christian Borntraeger

s390/mm: use RCU for gmap notifier list and the per-mm gmap list

The gmap notifier list and the gmap list in the mm_struct change rarely.
Use RCU to optimize the reader of these lists.
Reviewed-by: NDavid Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: NMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: NChristian Borntraeger <borntraeger@de.ibm.com>
上级 414d3b07
...@@ -39,6 +39,7 @@ struct gmap { ...@@ -39,6 +39,7 @@ struct gmap {
*/ */
struct gmap_notifier { struct gmap_notifier {
struct list_head list; struct list_head list;
struct rcu_head rcu;
void (*notifier_call)(struct gmap *gmap, unsigned long start, void (*notifier_call)(struct gmap *gmap, unsigned long start,
unsigned long end); unsigned long end);
}; };
......
...@@ -8,8 +8,9 @@ typedef struct { ...@@ -8,8 +8,9 @@ typedef struct {
cpumask_t cpu_attach_mask; cpumask_t cpu_attach_mask;
atomic_t attach_count; atomic_t attach_count;
unsigned int flush_mm; unsigned int flush_mm;
spinlock_t list_lock; spinlock_t pgtable_lock;
struct list_head pgtable_list; struct list_head pgtable_list;
spinlock_t gmap_lock;
struct list_head gmap_list; struct list_head gmap_list;
unsigned long asce; unsigned long asce;
unsigned long asce_limit; unsigned long asce_limit;
...@@ -22,9 +23,11 @@ typedef struct { ...@@ -22,9 +23,11 @@ typedef struct {
unsigned int use_skey:1; unsigned int use_skey:1;
} mm_context_t; } mm_context_t;
#define INIT_MM_CONTEXT(name) \ #define INIT_MM_CONTEXT(name) \
.context.list_lock = __SPIN_LOCK_UNLOCKED(name.context.list_lock), \ .context.pgtable_lock = \
.context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \ __SPIN_LOCK_UNLOCKED(name.context.pgtable_lock), \
.context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \
.context.gmap_lock = __SPIN_LOCK_UNLOCKED(name.context.gmap_lock), \
.context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list), .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list),
static inline int tprot(unsigned long addr) static inline int tprot(unsigned long addr)
......
...@@ -15,8 +15,9 @@ ...@@ -15,8 +15,9 @@
static inline int init_new_context(struct task_struct *tsk, static inline int init_new_context(struct task_struct *tsk,
struct mm_struct *mm) struct mm_struct *mm)
{ {
spin_lock_init(&mm->context.list_lock); spin_lock_init(&mm->context.pgtable_lock);
INIT_LIST_HEAD(&mm->context.pgtable_list); INIT_LIST_HEAD(&mm->context.pgtable_list);
spin_lock_init(&mm->context.gmap_lock);
INIT_LIST_HEAD(&mm->context.gmap_list); INIT_LIST_HEAD(&mm->context.gmap_list);
cpumask_clear(&mm->context.cpu_attach_mask); cpumask_clear(&mm->context.cpu_attach_mask);
atomic_set(&mm->context.attach_count, 0); atomic_set(&mm->context.attach_count, 0);
......
...@@ -70,9 +70,9 @@ struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit) ...@@ -70,9 +70,9 @@ struct gmap *gmap_alloc(struct mm_struct *mm, unsigned long limit)
gmap->asce = atype | _ASCE_TABLE_LENGTH | gmap->asce = atype | _ASCE_TABLE_LENGTH |
_ASCE_USER_BITS | __pa(table); _ASCE_USER_BITS | __pa(table);
gmap->asce_end = limit; gmap->asce_end = limit;
down_write(&mm->mmap_sem); spin_lock(&mm->context.gmap_lock);
list_add(&gmap->list, &mm->context.gmap_list); list_add_rcu(&gmap->list, &mm->context.gmap_list);
up_write(&mm->mmap_sem); spin_unlock(&mm->context.gmap_lock);
return gmap; return gmap;
out_free: out_free:
...@@ -128,14 +128,16 @@ void gmap_free(struct gmap *gmap) ...@@ -128,14 +128,16 @@ void gmap_free(struct gmap *gmap)
else else
__tlb_flush_global(); __tlb_flush_global();
spin_lock(&gmap->mm->context.gmap_lock);
list_del_rcu(&gmap->list);
spin_unlock(&gmap->mm->context.gmap_lock);
synchronize_rcu();
/* Free all segment & region tables. */ /* Free all segment & region tables. */
list_for_each_entry_safe(page, next, &gmap->crst_list, lru) list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
__free_pages(page, 2); __free_pages(page, 2);
gmap_radix_tree_free(&gmap->guest_to_host); gmap_radix_tree_free(&gmap->guest_to_host);
gmap_radix_tree_free(&gmap->host_to_guest); gmap_radix_tree_free(&gmap->host_to_guest);
down_write(&gmap->mm->mmap_sem);
list_del(&gmap->list);
up_write(&gmap->mm->mmap_sem);
kfree(gmap); kfree(gmap);
} }
EXPORT_SYMBOL_GPL(gmap_free); EXPORT_SYMBOL_GPL(gmap_free);
...@@ -369,11 +371,13 @@ void gmap_unlink(struct mm_struct *mm, unsigned long *table, ...@@ -369,11 +371,13 @@ void gmap_unlink(struct mm_struct *mm, unsigned long *table,
struct gmap *gmap; struct gmap *gmap;
int flush; int flush;
list_for_each_entry(gmap, &mm->context.gmap_list, list) { rcu_read_lock();
list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
flush = __gmap_unlink_by_vmaddr(gmap, vmaddr); flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
if (flush) if (flush)
gmap_flush_tlb(gmap); gmap_flush_tlb(gmap);
} }
rcu_read_unlock();
} }
/** /**
...@@ -555,7 +559,7 @@ static DEFINE_SPINLOCK(gmap_notifier_lock); ...@@ -555,7 +559,7 @@ static DEFINE_SPINLOCK(gmap_notifier_lock);
void gmap_register_ipte_notifier(struct gmap_notifier *nb) void gmap_register_ipte_notifier(struct gmap_notifier *nb)
{ {
spin_lock(&gmap_notifier_lock); spin_lock(&gmap_notifier_lock);
list_add(&nb->list, &gmap_notifier_list); list_add_rcu(&nb->list, &gmap_notifier_list);
spin_unlock(&gmap_notifier_lock); spin_unlock(&gmap_notifier_lock);
} }
EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier); EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
...@@ -567,8 +571,9 @@ EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier); ...@@ -567,8 +571,9 @@ EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
void gmap_unregister_ipte_notifier(struct gmap_notifier *nb) void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
{ {
spin_lock(&gmap_notifier_lock); spin_lock(&gmap_notifier_lock);
list_del_init(&nb->list); list_del_rcu(&nb->list);
spin_unlock(&gmap_notifier_lock); spin_unlock(&gmap_notifier_lock);
synchronize_rcu();
} }
EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier); EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
...@@ -662,16 +667,18 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte) ...@@ -662,16 +667,18 @@ void ptep_notify(struct mm_struct *mm, unsigned long vmaddr, pte_t *pte)
offset = ((unsigned long) pte) & (255 * sizeof(pte_t)); offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
offset = offset * (4096 / sizeof(pte_t)); offset = offset * (4096 / sizeof(pte_t));
spin_lock(&gmap_notifier_lock); rcu_read_lock();
list_for_each_entry(gmap, &mm->context.gmap_list, list) { list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
spin_lock(&gmap->guest_table_lock);
table = radix_tree_lookup(&gmap->host_to_guest, table = radix_tree_lookup(&gmap->host_to_guest,
vmaddr >> PMD_SHIFT); vmaddr >> PMD_SHIFT);
if (!table) if (table)
continue; gaddr = __gmap_segment_gaddr(table) + offset;
gaddr = __gmap_segment_gaddr(table) + offset; spin_unlock(&gmap->guest_table_lock);
gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1); if (table)
gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
} }
spin_unlock(&gmap_notifier_lock); rcu_read_unlock();
} }
EXPORT_SYMBOL_GPL(ptep_notify); EXPORT_SYMBOL_GPL(ptep_notify);
......
...@@ -149,7 +149,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) ...@@ -149,7 +149,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
/* Try to get a fragment of a 4K page as a 2K page table */ /* Try to get a fragment of a 4K page as a 2K page table */
if (!mm_alloc_pgste(mm)) { if (!mm_alloc_pgste(mm)) {
table = NULL; table = NULL;
spin_lock_bh(&mm->context.list_lock); spin_lock_bh(&mm->context.pgtable_lock);
if (!list_empty(&mm->context.pgtable_list)) { if (!list_empty(&mm->context.pgtable_list)) {
page = list_first_entry(&mm->context.pgtable_list, page = list_first_entry(&mm->context.pgtable_list,
struct page, lru); struct page, lru);
...@@ -164,7 +164,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) ...@@ -164,7 +164,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
list_del(&page->lru); list_del(&page->lru);
} }
} }
spin_unlock_bh(&mm->context.list_lock); spin_unlock_bh(&mm->context.pgtable_lock);
if (table) if (table)
return table; return table;
} }
...@@ -187,9 +187,9 @@ unsigned long *page_table_alloc(struct mm_struct *mm) ...@@ -187,9 +187,9 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
/* Return the first 2K fragment of the page */ /* Return the first 2K fragment of the page */
atomic_set(&page->_mapcount, 1); atomic_set(&page->_mapcount, 1);
clear_table(table, _PAGE_INVALID, PAGE_SIZE); clear_table(table, _PAGE_INVALID, PAGE_SIZE);
spin_lock_bh(&mm->context.list_lock); spin_lock_bh(&mm->context.pgtable_lock);
list_add(&page->lru, &mm->context.pgtable_list); list_add(&page->lru, &mm->context.pgtable_list);
spin_unlock_bh(&mm->context.list_lock); spin_unlock_bh(&mm->context.pgtable_lock);
} }
return table; return table;
} }
...@@ -203,13 +203,13 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) ...@@ -203,13 +203,13 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
if (!mm_alloc_pgste(mm)) { if (!mm_alloc_pgste(mm)) {
/* Free 2K page table fragment of a 4K page */ /* Free 2K page table fragment of a 4K page */
bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
spin_lock_bh(&mm->context.list_lock); spin_lock_bh(&mm->context.pgtable_lock);
mask = atomic_xor_bits(&page->_mapcount, 1U << bit); mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
if (mask & 3) if (mask & 3)
list_add(&page->lru, &mm->context.pgtable_list); list_add(&page->lru, &mm->context.pgtable_list);
else else
list_del(&page->lru); list_del(&page->lru);
spin_unlock_bh(&mm->context.list_lock); spin_unlock_bh(&mm->context.pgtable_lock);
if (mask != 0) if (mask != 0)
return; return;
} }
...@@ -235,13 +235,13 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, ...@@ -235,13 +235,13 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
return; return;
} }
bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)); bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
spin_lock_bh(&mm->context.list_lock); spin_lock_bh(&mm->context.pgtable_lock);
mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit); mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
if (mask & 3) if (mask & 3)
list_add_tail(&page->lru, &mm->context.pgtable_list); list_add_tail(&page->lru, &mm->context.pgtable_list);
else else
list_del(&page->lru); list_del(&page->lru);
spin_unlock_bh(&mm->context.list_lock); spin_unlock_bh(&mm->context.pgtable_lock);
table = (unsigned long *) (__pa(table) | (1U << bit)); table = (unsigned long *) (__pa(table) | (1U << bit));
tlb_remove_table(tlb, table); tlb_remove_table(tlb, table);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册