提交 70d64cea 编写于 作者: P Paul Mackerras

powerpc: Rename files to have consistent _32/_64 suffixes

This doesn't change any code, just renames things so we consistently
have foo_32.c and foo_64.c where we have separate 32- and 64-bit
versions.
Signed-off-by: NPaul Mackerras <paulus@samba.org>
上级 a432403a
......@@ -112,7 +112,7 @@ CFLAGS += $(cpu-as-y)
# Default to the common case.
KBUILD_DEFCONFIG := common_defconfig
head-y := arch/powerpc/kernel/head.o
head-y := arch/powerpc/kernel/head_32.o
head-$(CONFIG_PPC64) := arch/powerpc/kernel/head_64.o
head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o
head-$(CONFIG_4xx) := arch/powerpc/kernel/head_4xx.o
......
......@@ -10,7 +10,7 @@ CFLAGS_prom_init.o += -fPIC
CFLAGS_btext.o += -fPIC
endif
extra-$(CONFIG_PPC_STD_MMU) := head.o
extra-$(CONFIG_PPC_STD_MMU) := head_32.o
extra-$(CONFIG_PPC64) := head_64.o
extra-$(CONFIG_40x) := head_4xx.o
extra-$(CONFIG_44x) := head_44x.o
......@@ -21,7 +21,7 @@ extra-$(CONFIG_PPC_FPU) += fpu.o
extra-y += vmlinux.lds
obj-y := traps.o prom.o semaphore.o
obj-$(CONFIG_PPC32) += setup.o process.o
obj-$(CONFIG_PPC32) += setup_32.o process.o
obj-$(CONFIG_PPC64) += idle_power4.o
ifeq ($(CONFIG_PPC32),y)
obj-$(CONFIG_PPC_OF) += prom_init.o of_device.o
......
......@@ -3,7 +3,7 @@
#
obj-y := strcase.o string.o
obj-$(CONFIG_PPC32) += div64.o copy32.o checksum.o
obj-$(CONFIG_PPC64) += copypage.o copyuser.o memcpy.o usercopy.o \
sstep.o checksum64.o
obj-$(CONFIG_PPC32) += div64.o copy_32.o checksum_32.o
obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o memcpy_64.o \
usercopy_64.o sstep.o checksum_64.o mem_64.o
obj-$(CONFIG_PPC_ISERIES) += e2a.o
/*
* String handling functions for PowerPC.
*
* Copyright (C) 1996 Paul Mackerras.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/processor.h>
#include <asm/errno.h>
#include <asm/ppc_asm.h>
_GLOBAL(memset)
neg r0,r3
rlwimi r4,r4,8,16,23
andi. r0,r0,7 /* # bytes to be 8-byte aligned */
rlwimi r4,r4,16,0,15
cmplw cr1,r5,r0 /* do we get that far? */
rldimi r4,r4,32,0
mtcrf 1,r0
mr r6,r3
blt cr1,8f
beq+ 3f /* if already 8-byte aligned */
subf r5,r0,r5
bf 31,1f
stb r4,0(r6)
addi r6,r6,1
1: bf 30,2f
sth r4,0(r6)
addi r6,r6,2
2: bf 29,3f
stw r4,0(r6)
addi r6,r6,4
3: srdi. r0,r5,6
clrldi r5,r5,58
mtctr r0
beq 5f
4: std r4,0(r6)
std r4,8(r6)
std r4,16(r6)
std r4,24(r6)
std r4,32(r6)
std r4,40(r6)
std r4,48(r6)
std r4,56(r6)
addi r6,r6,64
bdnz 4b
5: srwi. r0,r5,3
clrlwi r5,r5,29
mtcrf 1,r0
beq 8f
bf 29,6f
std r4,0(r6)
std r4,8(r6)
std r4,16(r6)
std r4,24(r6)
addi r6,r6,32
6: bf 30,7f
std r4,0(r6)
std r4,8(r6)
addi r6,r6,16
7: bf 31,8f
std r4,0(r6)
addi r6,r6,8
8: cmpwi r5,0
mtcrf 1,r5
beqlr+
bf 29,9f
stw r4,0(r6)
addi r6,r6,4
9: bf 30,10f
sth r4,0(r6)
addi r6,r6,2
10: bflr 31
stb r4,0(r6)
blr
_GLOBAL(memmove)
cmplw 0,r3,r4
bgt .backwards_memcpy
b .memcpy
_GLOBAL(backwards_memcpy)
rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
add r6,r3,r5
add r4,r4,r5
beq 2f
andi. r0,r6,3
mtctr r7
bne 5f
1: lwz r7,-4(r4)
lwzu r8,-8(r4)
stw r7,-4(r6)
stwu r8,-8(r6)
bdnz 1b
andi. r5,r5,7
2: cmplwi 0,r5,4
blt 3f
lwzu r0,-4(r4)
subi r5,r5,4
stwu r0,-4(r6)
3: cmpwi 0,r5,0
beqlr
mtctr r5
4: lbzu r0,-1(r4)
stbu r0,-1(r6)
bdnz 4b
blr
5: mtctr r0
6: lbzu r7,-1(r4)
stbu r7,-1(r6)
bdnz 6b
subf r5,r0,r5
rlwinm. r7,r5,32-3,3,31
beq 2b
mtctr r7
b 1b
......@@ -3,10 +3,10 @@
#
obj-y := fault.o mem.o lmb.o
obj-$(CONFIG_PPC32) += init.o pgtable.o mmu_context.o \
tlb.o
obj-$(CONFIG_PPC64) += init64.o pgtable64.o mmu_context64.o
obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu.o hash_32.o
obj-$(CONFIG_PPC32) += init_32.o pgtable_32.o mmu_context_32.o \
tlb_32.o
obj-$(CONFIG_PPC64) += init_64.o pgtable_64.o mmu_context_64.o
obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o hash_low_32.o
obj-$(CONFIG_40x) += 4xx_mmu.o
obj-$(CONFIG_44x) += 44x_mmu.o
obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o
/*
* PowerPC version
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* Dave Engebretsen <engebret@us.ibm.com>
* Rework for PPC64 port.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/stddef.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/idr.h>
#include <linux/nodemask.h>
#include <linux/module.h>
#include <asm/pgalloc.h>
#include <asm/page.h>
#include <asm/prom.h>
#include <asm/lmb.h>
#include <asm/rtas.h>
#include <asm/io.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/mmu.h>
#include <asm/uaccess.h>
#include <asm/smp.h>
#include <asm/machdep.h>
#include <asm/tlb.h>
#include <asm/eeh.h>
#include <asm/processor.h>
#include <asm/mmzone.h>
#include <asm/cputable.h>
#include <asm/ppcdebug.h>
#include <asm/sections.h>
#include <asm/system.h>
#include <asm/iommu.h>
#include <asm/abs_addr.h>
#include <asm/vdso.h>
#include <asm/imalloc.h>
/*
* This is called by /dev/mem to know if a given address has to
* be mapped non-cacheable or not
*/
int page_is_ram(unsigned long pfn)
{
int i;
unsigned long paddr = (pfn << PAGE_SHIFT);
for (i=0; i < lmb.memory.cnt; i++) {
unsigned long base;
base = lmb.memory.region[i].base;
if ((paddr >= base) &&
(paddr < (base + lmb.memory.region[i].size))) {
return 1;
}
}
return 0;
}
EXPORT_SYMBOL(page_is_ram);
pgprot_t phys_mem_access_prot(struct file *file, unsigned long addr,
unsigned long size, pgprot_t vma_prot)
{
if (ppc_md.phys_mem_access_prot)
return ppc_md.phys_mem_access_prot(file, addr, size, vma_prot);
if (!page_is_ram(addr >> PAGE_SHIFT))
vma_prot = __pgprot(pgprot_val(vma_prot)
| _PAGE_GUARDED | _PAGE_NO_CACHE);
return vma_prot;
}
EXPORT_SYMBOL(phys_mem_access_prot);
void show_mem(void)
{
unsigned long total = 0, reserved = 0;
unsigned long shared = 0, cached = 0;
struct page *page;
pg_data_t *pgdat;
unsigned long i;
printk("Mem-info:\n");
show_free_areas();
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
for_each_pgdat(pgdat) {
for (i = 0; i < pgdat->node_spanned_pages; i++) {
page = pgdat_page_nr(pgdat, i);
total++;
if (PageReserved(page))
reserved++;
else if (PageSwapCache(page))
cached++;
else if (page_count(page))
shared += page_count(page) - 1;
}
}
printk("%ld pages of RAM\n", total);
printk("%ld reserved pages\n", reserved);
printk("%ld pages shared\n", shared);
printk("%ld pages swap cached\n", cached);
}
/*
* This is called when a page has been modified by the kernel.
* It just marks the page as not i-cache clean. We do the i-cache
* flush later when the page is given to a user process, if necessary.
*/
void flush_dcache_page(struct page *page)
{
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
return;
/* avoid an atomic op if possible */
if (test_bit(PG_arch_1, &page->flags))
clear_bit(PG_arch_1, &page->flags);
}
EXPORT_SYMBOL(flush_dcache_page);
void clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
clear_page(page);
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
return;
/*
* We shouldnt have to do this, but some versions of glibc
* require it (ld.so assumes zero filled pages are icache clean)
* - Anton
*/
/* avoid an atomic op if possible */
if (test_bit(PG_arch_1, &pg->flags))
clear_bit(PG_arch_1, &pg->flags);
}
EXPORT_SYMBOL(clear_user_page);
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *pg)
{
copy_page(vto, vfrom);
/*
* We should be able to use the following optimisation, however
* there are two problems.
* Firstly a bug in some versions of binutils meant PLT sections
* were not marked executable.
* Secondly the first word in the GOT section is blrl, used
* to establish the GOT address. Until recently the GOT was
* not marked executable.
* - Anton
*/
#if 0
if (!vma->vm_file && ((vma->vm_flags & VM_EXEC) == 0))
return;
#endif
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
return;
/* avoid an atomic op if possible */
if (test_bit(PG_arch_1, &pg->flags))
clear_bit(PG_arch_1, &pg->flags);
}
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
unsigned long maddr;
maddr = (unsigned long)page_address(page) + (addr & ~PAGE_MASK);
flush_icache_range(maddr, maddr + len);
}
EXPORT_SYMBOL(flush_icache_user_range);
/*
* This is called at the end of handling a user page fault, when the
* fault has been handled by updating a PTE in the linux page tables.
* We use it to preload an HPTE into the hash table corresponding to
* the updated linux PTE.
*
* This must always be called with the mm->page_table_lock held
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
pte_t pte)
{
unsigned long vsid;
void *pgdir;
pte_t *ptep;
int local = 0;
cpumask_t tmp;
unsigned long flags;
/* handle i-cache coherency */
if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE) &&
!cpu_has_feature(CPU_FTR_NOEXECUTE)) {
unsigned long pfn = pte_pfn(pte);
if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn);
if (!PageReserved(page)
&& !test_bit(PG_arch_1, &page->flags)) {
__flush_dcache_icache(page_address(page));
set_bit(PG_arch_1, &page->flags);
}
}
}
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
if (!pte_young(pte))
return;
pgdir = vma->vm_mm->pgd;
if (pgdir == NULL)
return;
ptep = find_linux_pte(pgdir, ea);
if (!ptep)
return;
vsid = get_vsid(vma->vm_mm->context.id, ea);
local_irq_save(flags);
tmp = cpumask_of_cpu(smp_processor_id());
if (cpus_equal(vma->vm_mm->cpu_vm_mask, tmp))
local = 1;
__hash_page(ea, pte_val(pte) & (_PAGE_USER|_PAGE_RW), vsid, ptep,
0x300, local);
local_irq_restore(flags);
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册