copypage-xscale.c 3.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17
/*
 *  linux/arch/arm/lib/copypage-xscale.S
 *
 *  Copyright (C) 1995-2005 Russell King
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * This handles the mini data cache, as found on SA11x0 and XScale
 * processors.  When we copy a user page page, we map it in such a way
 * that accesses to this page will not touch the main data cache, but
 * will be cached in the mini data cache.  This prevents us thrashing
 * the main data cache on page faults.
 */
#include <linux/init.h>
#include <linux/mm.h>
18
#include <linux/highmem.h>
19 20 21

#include <asm/pgtable.h>
#include <asm/tlbflush.h>
22
#include <asm/cacheflush.h>
23

24 25
#include "mm.h"

26
#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
27
				  L_PTE_MT_MINICACHE)
28

29
static DEFINE_RAW_SPINLOCK(minicache_lock);
30 31

/*
32
 * XScale mini-dcache optimised copy_user_highpage
33 34 35 36 37 38
 *
 * We flush the destination cache lines just before we write the data into the
 * corresponding address.  Since the Dcache is read-allocate, this removes the
 * Dcache aliasing issue.  The writes will be forwarded to the write buffer,
 * and merged as appropriate.
 */
39
static void __naked
40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86
mc_copy_user_page(void *from, void *to)
{
	/*
	 * Strangely enough, best performance is achieved
	 * when prefetching destination as well.  (NP)
	 */
	asm volatile(
	"stmfd	sp!, {r4, r5, lr}		\n\
	mov	lr, %2				\n\
	pld	[r0, #0]			\n\
	pld	[r0, #32]			\n\
	pld	[r1, #0]			\n\
	pld	[r1, #32]			\n\
1:	pld	[r0, #64]			\n\
	pld	[r0, #96]			\n\
	pld	[r1, #64]			\n\
	pld	[r1, #96]			\n\
2:	ldrd	r2, [r0], #8			\n\
	ldrd	r4, [r0], #8			\n\
	mov	ip, r1				\n\
	strd	r2, [r1], #8			\n\
	ldrd	r2, [r0], #8			\n\
	strd	r4, [r1], #8			\n\
	ldrd	r4, [r0], #8			\n\
	strd	r2, [r1], #8			\n\
	strd	r4, [r1], #8			\n\
	mcr	p15, 0, ip, c7, c10, 1		@ clean D line\n\
	ldrd	r2, [r0], #8			\n\
	mcr	p15, 0, ip, c7, c6, 1		@ invalidate D line\n\
	ldrd	r4, [r0], #8			\n\
	mov	ip, r1				\n\
	strd	r2, [r1], #8			\n\
	ldrd	r2, [r0], #8			\n\
	strd	r4, [r1], #8			\n\
	ldrd	r4, [r0], #8			\n\
	strd	r2, [r1], #8			\n\
	strd	r4, [r1], #8			\n\
	mcr	p15, 0, ip, c7, c10, 1		@ clean D line\n\
	subs	lr, lr, #1			\n\
	mcr	p15, 0, ip, c7, c6, 1		@ invalidate D line\n\
	bgt	1b				\n\
	beq	2b				\n\
	ldmfd	sp!, {r4, r5, pc}		"
	:
	: "r" (from), "r" (to), "I" (PAGE_SIZE / 64 - 1));
}

87
void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
88
	unsigned long vaddr, struct vm_area_struct *vma)
89
{
90
	void *kto = kmap_atomic(to);
91

92
	if (!test_and_set_bit(PG_dcache_clean, &from->flags))
93
		__flush_dcache_page(page_mapping(from), from);
94

95
	raw_spin_lock(&minicache_lock);
96

97
	set_top_pte(COPYPAGE_MINICACHE, mk_pte(from, minicache_pgprot));
98 99 100

	mc_copy_user_page((void *)COPYPAGE_MINICACHE, kto);

101
	raw_spin_unlock(&minicache_lock);
102

103
	kunmap_atomic(kto);
104 105 106 107 108
}

/*
 * XScale optimised clear_user_page
 */
109 110
void
xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
111
{
112
	void *ptr, *kaddr = kmap_atomic(page);
113
	asm volatile(
114
	"mov	r1, %2				\n\
115 116
	mov	r2, #0				\n\
	mov	r3, #0				\n\
117 118 119 120 121
1:	mov	ip, %0				\n\
	strd	r2, [%0], #8			\n\
	strd	r2, [%0], #8			\n\
	strd	r2, [%0], #8			\n\
	strd	r2, [%0], #8			\n\
122 123 124
	mcr	p15, 0, ip, c7, c10, 1		@ clean D line\n\
	subs	r1, r1, #1			\n\
	mcr	p15, 0, ip, c7, c6, 1		@ invalidate D line\n\
125
	bne	1b"
126 127
	: "=r" (ptr)
	: "0" (kaddr), "I" (PAGE_SIZE / 32)
128
	: "r1", "r2", "r3", "ip");
129
	kunmap_atomic(kaddr);
130 131 132
}

struct cpu_user_fns xscale_mc_user_fns __initdata = {
133
	.cpu_clear_user_highpage = xscale_mc_clear_user_highpage,
134
	.cpu_copy_user_highpage	= xscale_mc_copy_user_highpage,
135
};