提交 14cf11af 编写于 作者: P Paul Mackerras

powerpc: Merge enough to start building in arch/powerpc.

This creates the directory structure under arch/powerpc and a bunch
of Kconfig files.  It does a first-cut merge of arch/powerpc/mm,
arch/powerpc/lib and arch/powerpc/platforms/powermac.  This is enough
to build a 32-bit powermac kernel with ARCH=powerpc.

For now we are getting some unmerged files from arch/ppc/kernel and
arch/ppc/syslib, or arch/ppc64/kernel.  This makes some minor changes
to files in those directories and files outside arch/powerpc.

The boot directory is still not merged.  That's going to be interesting.
Signed-off-by: NPaul Mackerras <paulus@samba.org>
上级 e5baa396
此差异已折叠。
menu "Kernel hacking"
source "lib/Kconfig.debug"
config KGDB
bool "Include kgdb kernel debugger"
depends on DEBUG_KERNEL && (BROKEN || PPC_GEN550 || 4xx)
select DEBUG_INFO
help
Include in-kernel hooks for kgdb, the Linux kernel source level
debugger. See <http://kgdb.sourceforge.net/> for more information.
Unless you are intending to debug the kernel, say N here.
choice
prompt "Serial Port"
depends on KGDB
default KGDB_TTYS1
config KGDB_TTYS0
bool "ttyS0"
config KGDB_TTYS1
bool "ttyS1"
config KGDB_TTYS2
bool "ttyS2"
config KGDB_TTYS3
bool "ttyS3"
endchoice
config KGDB_CONSOLE
bool "Enable serial console thru kgdb port"
depends on KGDB && 8xx || CPM2
help
If you enable this, all serial console messages will be sent
over the gdb stub.
If unsure, say N.
config XMON
bool "Include xmon kernel debugger"
depends on DEBUG_KERNEL
help
Include in-kernel hooks for the xmon kernel monitor/debugger.
Unless you are intending to debug the kernel, say N here.
config BDI_SWITCH
bool "Include BDI-2000 user context switcher"
depends on DEBUG_KERNEL
help
Include in-kernel support for the Abatron BDI2000 debugger.
Unless you are intending to debug the kernel with one of these
machines, say N here.
config BOOTX_TEXT
bool "Support for early boot text console (BootX or OpenFirmware only)"
depends PPC_OF
help
Say Y here to see progress messages from the boot firmware in text
mode. Requires either BootX or Open Firmware.
config SERIAL_TEXT_DEBUG
bool "Support for early boot texts over serial port"
depends on 4xx || LOPEC || MV64X60 || PPLUS || PRPMC800 || \
PPC_GEN550 || PPC_MPC52xx
config PPC_OCP
bool
depends on IBM_OCP || XILINX_OCP
default y
endmenu
# This file is included by the global makefile so that you can add your own
# architecture-specific flags and dependencies. Remember to do have actions
# for "archclean" and "archdep" for cleaning up and making dependencies for
# this architecture.
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 1994 by Linus Torvalds
# Changes for PPC by Gary Thomas
# Rewritten by Cort Dougan and Paul Mackerras
#
# This must match PAGE_OFFSET in include/asm-powerpc/page.h.
KERNELLOAD := $(CONFIG_KERNEL_START)
HAS_BIARCH := $(call cc-option-yn, -m32)
ifeq ($(CONFIG_PPC64),y)
SZ := 64
# Set default 32 bits cross compilers for vdso and boot wrapper
CROSS32_COMPILE ?=
CROSS32CC := $(CROSS32_COMPILE)gcc
CROSS32AS := $(CROSS32_COMPILE)as
CROSS32LD := $(CROSS32_COMPILE)ld
CROSS32OBJCOPY := $(CROSS32_COMPILE)objcopy
ifeq ($(HAS_BIARCH),y)
ifeq ($(CROSS32_COMPILE),)
CROSS32CC := $(CC) -m32
CROSS32AS := $(AS) -a32
CROSS32LD := $(LD) -m elf32ppc
CROSS32OBJCOPY := $(OBJCOPY)
endif
endif
export CROSS32CC CROSS32AS CROSS32LD CROSS32OBJCOPY
new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi)
ifeq ($(new_nm),y)
NM := $(NM) --synthetic
endif
else
SZ := 32
endif
ifeq ($(HAS_BIARCH),y)
override AS += -a$(SZ)
override LD += -m elf$(SZ)ppc
override CC += -m$(SZ)
endif
LDFLAGS_vmlinux := -Ttext $(KERNELLOAD) -Bstatic -e $(KERNELLOAD)
# The -Iarch/$(ARCH)/include is temporary while we are merging
CPPFLAGS += -Iarch/$(ARCH) -Iarch/$(ARCH)/include
AFLAGS += -Iarch/$(ARCH)
CFLAGS += -Iarch/$(ARCH) -msoft-float -pipe
ifeq ($(CONFIG_PPC64),y)
CFLAGS += -mminimal-toc -mtraceback=none -mcall-aixdesc
else
CFLAGS += -ffixed-r2 -mmultiple
endif
CPP = $(CC) -E $(CFLAGS)
# Temporary hack until we have migrated to asm-powerpc
LINUXINCLUDE += -Iarch/$(ARCH)/include
CHECKFLAGS += -m$(SZ) -D__powerpc__ -D__powerpc$(SZ)__
ifeq ($(CONFIG_PPC64),y)
GCC_VERSION := $(call cc-version)
GCC_BROKEN_VEC := $(shell if [ $(GCC_VERSION) -lt 0400 ] ; then echo "y"; fi)
ifeq ($(CONFIG_POWER4_ONLY),y)
ifeq ($(CONFIG_ALTIVEC),y)
ifeq ($(GCC_BROKEN_VEC),y)
CFLAGS += $(call cc-option,-mcpu=970)
else
CFLAGS += $(call cc-option,-mcpu=power4)
endif
else
CFLAGS += $(call cc-option,-mcpu=power4)
endif
else
CFLAGS += $(call cc-option,-mtune=power4)
endif
endif
# Enable unit-at-a-time mode when possible. It shrinks the
# kernel considerably.
CFLAGS += $(call cc-option,-funit-at-a-time)
ifndef CONFIG_FSL_BOOKE
CFLAGS += -mstring
endif
cpu-as-$(CONFIG_PPC64BRIDGE) += -Wa,-mppc64bridge
cpu-as-$(CONFIG_4xx) += -Wa,-m405
cpu-as-$(CONFIG_6xx) += -Wa,-maltivec
cpu-as-$(CONFIG_POWER4) += -Wa,-maltivec
cpu-as-$(CONFIG_E500) += -Wa,-me500
cpu-as-$(CONFIG_E200) += -Wa,-me200
AFLAGS += $(cpu-as-y)
CFLAGS += $(cpu-as-y)
# Default to the common case.
KBUILD_DEFCONFIG := common_defconfig
head-y := arch/powerpc/kernel/head.o
head-$(CONFIG_PPC64) := arch/powerpc/kernel/head_64.o
head-$(CONFIG_8xx) := arch/powerpc/kernel/head_8xx.o
head-$(CONFIG_4xx) := arch/powerpc/kernel/head_4xx.o
head-$(CONFIG_44x) := arch/powerpc/kernel/head_44x.o
head-$(CONFIG_FSL_BOOKE) := arch/powerpc/kernel/head_fsl_booke.o
ifeq ($(CONFIG_PPC32),y)
head-$(CONFIG_6xx) += arch/powerpc/kernel/idle_6xx.o
head-$(CONFIG_POWER4) += arch/powerpc/kernel/idle_power4.o
head-$(CONFIG_PPC_FPU) += arch/powerpc/kernel/fpu.o
endif
core-y += arch/powerpc/kernel/ \
arch/powerpc/mm/ \
arch/powerpc/lib/ \
arch/powerpc/sysdev/
core-$(CONFIG_PPC32) += arch/ppc/kernel/ \
arch/ppc/syslib/
core-$(CONFIG_PPC64) += arch/ppc64/kernel/
core-$(CONFIG_PPC_PMAC) += arch/powerpc/platforms/powermac/
core-$(CONFIG_4xx) += arch/ppc/platforms/4xx/
core-$(CONFIG_83xx) += arch/ppc/platforms/83xx/
core-$(CONFIG_85xx) += arch/ppc/platforms/85xx/
core-$(CONFIG_MATH_EMULATION) += arch/ppc/math-emu/
core-$(CONFIG_XMON) += arch/powerpc/xmon/
core-$(CONFIG_APUS) += arch/ppc/amiga/
drivers-$(CONFIG_8xx) += arch/ppc/8xx_io/
drivers-$(CONFIG_4xx) += arch/ppc/4xx_io/
drivers-$(CONFIG_CPM2) += arch/ppc/8260_io/
drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm
.PHONY: $(BOOT_TARGETS)
all: uImage zImage
CPPFLAGS_vmlinux.lds := -Upowerpc
# All the instructions talk about "make bzImage".
bzImage: zImage
boot := arch/$(ARCH)/boot
$(BOOT_TARGETS): vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
uImage: vmlinux
$(Q)$(MAKE) $(build)=$(boot)/images $(boot)/images/$@
define archhelp
@echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/images/zImage.*)'
@echo ' uImage - Create a bootable image for U-Boot / PPCBoot'
@echo ' install - Install kernel using'
@echo ' (your) ~/bin/installkernel or'
@echo ' (distribution) /sbin/installkernel or'
@echo ' install to $$(INSTALL_PATH) and run lilo'
@echo ' *_defconfig - Select default config from arch/$(ARCH)/ppc/configs'
endef
archclean:
$(Q)$(MAKE) $(clean)=arch/ppc/boot
# Temporary hack until we have migrated to asm-powerpc
$(Q)rm -rf arch/$(ARCH)/include
archprepare: checkbin
# Temporary hack until we have migrated to asm-powerpc
ifeq ($(CONFIG_PPC64),y)
include/asm: arch/$(ARCH)/include/asm
arch/$(ARCH)/include/asm:
$(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi
$(Q)ln -fsn $(srctree)/include/asm-ppc64 arch/$(ARCH)/include/asm
else
include/asm: arch/$(ARCH)/include/asm
arch/$(ARCH)/include/asm:
$(Q)if [ ! -d arch/$(ARCH)/include ]; then mkdir -p arch/$(ARCH)/include; fi
$(Q)ln -fsn $(srctree)/include/asm-ppc arch/$(ARCH)/include/asm
endif
# Use the file '.tmp_gas_check' for binutils tests, as gas won't output
# to stdout and these checks are run even on install targets.
TOUT := .tmp_gas_check
# Ensure this is binutils 2.12.1 (or 2.12.90.0.7) or later for altivec
# instructions.
# gcc-3.4 and binutils-2.14 are a fatal combination.
GCC_VERSION := $(call cc-version)
checkbin:
@if test "$(GCC_VERSION)" = "0304" ; then \
if ! /bin/echo mftb 5 | $(AS) -v -mppc -many -o $(TOUT) >/dev/null 2>&1 ; then \
echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build '; \
echo 'correctly with gcc-3.4 and your version of binutils.'; \
echo '*** Please upgrade your binutils or downgrade your gcc'; \
false; \
fi ; \
fi
@if ! /bin/echo dssall | $(AS) -many -o $(TOUT) >/dev/null 2>&1 ; then \
echo -n '*** ${VERSION}.${PATCHLEVEL} kernels no longer build ' ; \
echo 'correctly with old versions of binutils.' ; \
echo '*** Please upgrade your binutils to 2.12.1 or newer' ; \
false ; \
fi
CLEAN_FILES += $(TOUT)
#
# Makefile for the linux kernel.
#
extra-$(CONFIG_PPC_STD_MMU) := head.o
extra_$(CONFIG_PPC64) := head_64.o
extra-$(CONFIG_40x) := head_4xx.o
extra-$(CONFIG_44x) := head_44x.o
extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o
extra-$(CONFIG_8xx) := head_8xx.o
extra-$(CONFIG_6xx) += idle_6xx.o
extra-$(CONFIG_POWER4) += idle_power4.o
extra-$(CONFIG_PPC_FPU) += fpu.o
extra-y += vmlinux.lds
obj-y := semaphore.o traps.o process.o
obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
/*
* This program is used to generate definitions needed by
* assembly language modules.
*
* We use the technique used in the OSF Mach kernel code:
* generate asm statements containing #defines,
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/suspend.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/time.h>
#include <linux/hardirq.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#include <asm/lppaca.h>
#include <asm/iSeries/HvLpEvent.h>
#include <asm/rtas.h>
#include <asm/cache.h>
#include <asm/systemcfg.h>
#include <asm/compat.h>
#endif
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define BLANK() asm volatile("\n->" : : )
int main(void)
{
/* thread struct on stack */
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
#ifdef CONFIG_PPC32
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
#endif
#ifdef CONFIG_PPC64
DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror));
DEFINE(THREAD_SHIFT, THREAD_SHIFT);
#endif
DEFINE(THREAD_SIZE, THREAD_SIZE);
/* task_struct->thread */
DEFINE(THREAD, offsetof(struct task_struct, thread));
DEFINE(THREAD_INFO, offsetof(struct task_struct, thread_info));
DEFINE(MM, offsetof(struct task_struct, mm));
DEFINE(PTRACE, offsetof(struct task_struct, ptrace));
DEFINE(KSP, offsetof(struct thread_struct, ksp));
DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
DEFINE(THREAD_FPEXC_MODE, offsetof(struct thread_struct, fpexc_mode));
DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
DEFINE(THREAD_DBCR0, offsetof(struct thread_struct, dbcr0));
DEFINE(PT_PTRACED, PT_PTRACED);
#endif
#ifdef CONFIG_PPC64
DEFINE(KSP_VSID, offsetof(struct thread_struct, ksp_vsid));
#endif
#ifdef CONFIG_ALTIVEC
DEFINE(THREAD_VR0, offsetof(struct thread_struct, vr[0]));
DEFINE(THREAD_VRSAVE, offsetof(struct thread_struct, vrsave));
DEFINE(THREAD_VSCR, offsetof(struct thread_struct, vscr));
DEFINE(THREAD_USED_VR, offsetof(struct thread_struct, used_vr));
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_SPE
DEFINE(THREAD_EVR0, offsetof(struct thread_struct, evr[0]));
DEFINE(THREAD_ACC, offsetof(struct thread_struct, acc));
DEFINE(THREAD_SPEFSCR, offsetof(struct thread_struct, spefscr));
DEFINE(THREAD_USED_SPE, offsetof(struct thread_struct, used_spe));
#endif /* CONFIG_SPE */
/* Interrupt register frame */
DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD);
#ifndef CONFIG_PPC64
DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
#else
DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs));
/* 288 = # of volatile regs, int & fp, for leaf routines */
/* which do not stack a frame. See the PPC64 ABI. */
DEFINE(INT_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 288);
#endif
/* in fact we only use gpr0 - gpr9 and gpr20 - gpr23 */
DEFINE(GPR0, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[0]));
DEFINE(GPR1, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[1]));
DEFINE(GPR2, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[2]));
DEFINE(GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[3]));
DEFINE(GPR4, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[4]));
DEFINE(GPR5, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[5]));
DEFINE(GPR6, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[6]));
DEFINE(GPR7, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[7]));
DEFINE(GPR8, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[8]));
DEFINE(GPR9, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[9]));
DEFINE(GPR10, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[10]));
DEFINE(GPR11, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[11]));
DEFINE(GPR12, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[12]));
DEFINE(GPR13, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[13]));
DEFINE(GPR14, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[14]));
DEFINE(GPR15, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[15]));
DEFINE(GPR16, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[16]));
DEFINE(GPR17, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[17]));
DEFINE(GPR18, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[18]));
DEFINE(GPR19, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[19]));
DEFINE(GPR20, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[20]));
DEFINE(GPR21, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[21]));
DEFINE(GPR22, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[22]));
DEFINE(GPR23, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[23]));
DEFINE(GPR24, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[24]));
DEFINE(GPR25, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[25]));
DEFINE(GPR26, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[26]));
DEFINE(GPR27, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[27]));
DEFINE(GPR28, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[28]));
DEFINE(GPR29, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[29]));
DEFINE(GPR30, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[30]));
DEFINE(GPR31, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, gpr[31]));
/*
* Note: these symbols include _ because they overlap with special
* register names
*/
DEFINE(_NIP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, nip));
DEFINE(_MSR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, msr));
DEFINE(_CTR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ctr));
DEFINE(_LINK, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, link));
DEFINE(_CCR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, ccr));
DEFINE(_MQ, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, mq));
DEFINE(_XER, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, xer));
DEFINE(_DAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
DEFINE(_DSISR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
/* The PowerPC 400-class & Book-E processors have neither the DAR nor the DSISR
* SPRs. Hence, we overload them to hold the similar DEAR and ESR SPRs
* for such processors. For critical interrupts we use them to
* hold SRR0 and SRR1.
*/
DEFINE(_DEAR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dar));
DEFINE(_ESR, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, dsisr));
DEFINE(ORIG_GPR3, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, orig_gpr3));
DEFINE(RESULT, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, result));
DEFINE(TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
DEFINE(CLONE_VM, CLONE_VM);
DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
DEFINE(MM_PGD, offsetof(struct mm_struct, pgd));
/* About the CPU features table */
DEFINE(CPU_SPEC_ENTRY_SIZE, sizeof(struct cpu_spec));
DEFINE(CPU_SPEC_PVR_MASK, offsetof(struct cpu_spec, pvr_mask));
DEFINE(CPU_SPEC_PVR_VALUE, offsetof(struct cpu_spec, pvr_value));
DEFINE(CPU_SPEC_FEATURES, offsetof(struct cpu_spec, cpu_features));
DEFINE(CPU_SPEC_SETUP, offsetof(struct cpu_spec, cpu_setup));
#ifdef CONFIG_PPC64
DEFINE(MM, offsetof(struct task_struct, mm));
DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context));
DEFINE(DCACHEL1LINESIZE, offsetof(struct ppc64_caches, dline_size));
DEFINE(DCACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_dline_size));
DEFINE(DCACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, dlines_per_page));
DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
DEFINE(PLATFORM, offsetof(struct systemcfg, platform));
/* paca */
DEFINE(PACA_SIZE, sizeof(struct paca_struct));
DEFINE(PACAPACAINDEX, offsetof(struct paca_struct, paca_index));
DEFINE(PACAPROCSTART, offsetof(struct paca_struct, cpu_start));
DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
DEFINE(PACACURRENT, offsetof(struct paca_struct, __current));
DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
DEFINE(PACAPROCENABLED, offsetof(struct paca_struct, proc_enabled));
DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
#ifdef CONFIG_HUGETLB_PAGE
DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas));
DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas));
#endif /* CONFIG_HUGETLB_PAGE */
DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr));
DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
DEFINE(PACA_EXDSI, offsetof(struct paca_struct, exdsi));
DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca));
DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
/* RTAS */
DEFINE(RTASBASE, offsetof(struct rtas_t, base));
DEFINE(RTASENTRY, offsetof(struct rtas_t, entry));
DEFINE(_TRAP, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, trap));
DEFINE(SOFTE, STACK_FRAME_OVERHEAD+offsetof(struct pt_regs, softe));
/* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
DEFINE(PROM_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
DEFINE(RTAS_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs) + 16);
/* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
DEFINE(_SRR0, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs));
DEFINE(_SRR1, STACK_FRAME_OVERHEAD+sizeof(struct pt_regs)+8);
/* systemcfg offsets for use by vdso */
DEFINE(CFG_TB_ORIG_STAMP, offsetof(struct systemcfg, tb_orig_stamp));
DEFINE(CFG_TB_TICKS_PER_SEC, offsetof(struct systemcfg, tb_ticks_per_sec));
DEFINE(CFG_TB_TO_XS, offsetof(struct systemcfg, tb_to_xs));
DEFINE(CFG_STAMP_XSEC, offsetof(struct systemcfg, stamp_xsec));
DEFINE(CFG_TB_UPDATE_COUNT, offsetof(struct systemcfg, tb_update_count));
DEFINE(CFG_TZ_MINUTEWEST, offsetof(struct systemcfg, tz_minuteswest));
DEFINE(CFG_TZ_DSTTIME, offsetof(struct systemcfg, tz_dsttime));
DEFINE(CFG_SYSCALL_MAP32, offsetof(struct systemcfg, syscall_map_32));
DEFINE(CFG_SYSCALL_MAP64, offsetof(struct systemcfg, syscall_map_64));
/* timeval/timezone offsets for use by vdso */
DEFINE(TVAL64_TV_SEC, offsetof(struct timeval, tv_sec));
DEFINE(TVAL64_TV_USEC, offsetof(struct timeval, tv_usec));
DEFINE(TVAL32_TV_SEC, offsetof(struct compat_timeval, tv_sec));
DEFINE(TVAL32_TV_USEC, offsetof(struct compat_timeval, tv_usec));
DEFINE(TZONE_TZ_MINWEST, offsetof(struct timezone, tz_minuteswest));
DEFINE(TZONE_TZ_DSTTIME, offsetof(struct timezone, tz_dsttime));
#endif
DEFINE(pbe_address, offsetof(struct pbe, address));
DEFINE(pbe_orig_address, offsetof(struct pbe, orig_address));
DEFINE(pbe_next, offsetof(struct pbe, next));
DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28);
return 0;
}
/*
* FPU support code, moved here from head.S so that it can be used
* by chips which use other head-whatever.S files.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/config.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/thread_info.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
/*
* This task wants to use the FPU now.
* On UP, disable FP for the task which had the FPU previously,
* and save its floating-point registers in its thread_struct.
* Load up this task's FP registers from its thread_struct,
* enable the FPU for the current task and return to the task.
*/
.globl load_up_fpu
load_up_fpu:
mfmsr r5
ori r5,r5,MSR_FP
#ifdef CONFIG_PPC64BRIDGE
clrldi r5,r5,1 /* turn off 64-bit mode */
#endif /* CONFIG_PPC64BRIDGE */
SYNC
MTMSRD(r5) /* enable use of fpu now */
isync
/*
* For SMP, we don't do lazy FPU switching because it just gets too
* horrendously complex, especially when a task switches from one CPU
* to another. Instead we call giveup_fpu in switch_to.
*/
#ifndef CONFIG_SMP
tophys(r6,0) /* get __pa constant */
addis r3,r6,last_task_used_math@ha
lwz r4,last_task_used_math@l(r3)
cmpwi 0,r4,0
beq 1f
add r4,r4,r6
addi r4,r4,THREAD /* want last_task_used_math->thread */
SAVE_32FPRS(0, r4)
mffs fr0
stfd fr0,THREAD_FPSCR-4(r4)
lwz r5,PT_REGS(r4)
add r5,r5,r6
lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
li r10,MSR_FP|MSR_FE0|MSR_FE1
andc r4,r4,r10 /* disable FP for previous task */
stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#endif /* CONFIG_SMP */
/* enable use of FP after return */
mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
lwz r4,THREAD_FPEXC_MODE(r5)
ori r9,r9,MSR_FP /* enable FP for current */
or r9,r9,r4
lfd fr0,THREAD_FPSCR-4(r5)
mtfsf 0xff,fr0
REST_32FPRS(0, r5)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
sub r4,r4,r6
stw r4,last_task_used_math@l(r3)
#endif /* CONFIG_SMP */
/* restore registers and return */
/* we haven't used ctr or xer or lr */
b fast_exception_return
/*
* FP unavailable trap from kernel - print a message, but let
* the task use FP in the kernel until it returns to user mode.
*/
.globl KernelFP
KernelFP:
lwz r3,_MSR(r1)
ori r3,r3,MSR_FP
stw r3,_MSR(r1) /* enable use of FP after return */
lis r3,86f@h
ori r3,r3,86f@l
mr r4,r2 /* current */
lwz r5,_NIP(r1)
bl printk
b ret_from_except
86: .string "floating point used in kernel (task=%p, pc=%x)\n"
.align 4,0
/*
* giveup_fpu(tsk)
* Disable FP for the task given as the argument,
* and save the floating-point registers in its thread_struct.
* Enables the FPU for use in the kernel on return.
*/
.globl giveup_fpu
giveup_fpu:
mfmsr r5
ori r5,r5,MSR_FP
SYNC_601
ISYNC_601
MTMSRD(r5) /* enable use of fpu now */
SYNC_601
isync
cmpwi 0,r3,0
beqlr- /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */
lwz r5,PT_REGS(r3)
cmpwi 0,r5,0
SAVE_32FPRS(0, r3)
mffs fr0
stfd fr0,THREAD_FPSCR-4(r3)
beq 1f
lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
li r3,MSR_FP|MSR_FE0|MSR_FE1
andc r4,r4,r3 /* disable FP for previous task */
stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#ifndef CONFIG_SMP
li r5,0
lis r4,last_task_used_math@ha
stw r5,last_task_used_math@l(r4)
#endif /* CONFIG_SMP */
blr
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
#
# Makefile for ppc-specific library files..
#
obj-y := strcase.o string.o
obj-$(CONFIG_PPC32) += div64.o copy32.o checksum.o
obj-$(CONFIG_PPC64) += copypage.o copyuser.o memcpy.o usercopy.o \
sstep.o checksum64.o
obj-$(CONFIG_PPC_ISERIES) += e2a.o
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
/*
* Divide a 64-bit unsigned number by a 32-bit unsigned number.
* This routine assumes that the top 32 bits of the dividend are
* non-zero to start with.
* On entry, r3 points to the dividend, which get overwritten with
* the 64-bit quotient, and r4 contains the divisor.
* On exit, r3 contains the remainder.
*
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/ppc_asm.h>
#include <asm/processor.h>
_GLOBAL(__div64_32)
lwz r5,0(r3) # get the dividend into r5/r6
lwz r6,4(r3)
cmplw r5,r4
li r7,0
li r8,0
blt 1f
divwu r7,r5,r4 # if dividend.hi >= divisor,
mullw r0,r7,r4 # quotient.hi = dividend.hi / divisor
subf. r5,r0,r5 # dividend.hi %= divisor
beq 3f
1: mr r11,r5 # here dividend.hi != 0
andis. r0,r5,0xc000
bne 2f
cntlzw r0,r5 # we are shifting the dividend right
li r10,-1 # to make it < 2^32, and shifting
srw r10,r10,r0 # the divisor right the same amount,
add r9,r4,r10 # rounding up (so the estimate cannot
andc r11,r6,r10 # ever be too large, only too small)
andc r9,r9,r10
or r11,r5,r11
rotlw r9,r9,r0
rotlw r11,r11,r0
divwu r11,r11,r9 # then we divide the shifted quantities
2: mullw r10,r11,r4 # to get an estimate of the quotient,
mulhwu r9,r11,r4 # multiply the estimate by the divisor,
subfc r6,r10,r6 # take the product from the divisor,
add r8,r8,r11 # and add the estimate to the accumulated
subfe. r5,r9,r5 # quotient
bne 1b
3: cmplw r6,r4
blt 4f
divwu r0,r6,r4 # perform the remaining 32-bit division
mullw r10,r0,r4 # and get the remainder
add r8,r8,r0
subf r6,r10,r6
4: stw r7,0(r3) # return the quotient in *r3
stw r8,4(r3)
mr r3,r6 # return the remainder in r3
blr
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
#include <linux/ctype.h>
int strcasecmp(const char *s1, const char *s2)
{
int c1, c2;
do {
c1 = tolower(*s1++);
c2 = tolower(*s2++);
} while (c1 == c2 && c1 != 0);
return c1 - c2;
}
int strncasecmp(const char *s1, const char *s2, int n)
{
int c1, c2;
do {
c1 = tolower(*s1++);
c2 = tolower(*s2++);
} while ((--n > 0) && c1 == c2 && c1 != 0);
return c1 - c2;
}
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
obj-$(CONFIG_MPIC) += mpic.o
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册