提交 568180a5 编写于 作者: L Linus Torvalds

Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus

Pull MIPS fixes from Ralf Baechle:
 "A fair number of fixes across the field.  Nothing terribly
  complicated; the one liners in below changelog should be fairly
  descriptive.

  Noteworthy is the SB1 change which the result of changes to binutils
  resulting in one big gas warning for most files being assembled as
  well as the asid_cache and branch emulation fixes which fix corruption
  or possible uninteded behaviour of kernel or application code.  The
  remainder of fixes are more platforms or subsystem specific"

* 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus:
  MIPS: R46000: Fix Micro-assembler field overflow for R4600 V2
  MIPS: ptrace: Avoid smp_processor_id() in preemptible code
  MIPS: Lemote 2F: cs5536: mfgpt: use raw locks
  MIPS: SB1: Fix excessive kernel warnings.
  MIPS: RC32434: fix broken PCI resource initialization
  MIPS: malta: memory.c: Initialize the 'memsize' variable
  MIPS: Fix typo when reporting cache and ftlb errors for ImgTec cores
  MIPS: Fix inconsistancy of __NR_Linux_syscalls value
  MIPS: Fix branch emulation of branch likely instructions.
  MIPS: Fix a typo error in AUDIT_ARCH definition
  MIPS: Change type of asid_cache to unsigned long
......@@ -151,7 +151,7 @@ cflags-$(CONFIG_CPU_NEVADA) += $(call cc-option,-march=rm5200,-march=r5000) \
-Wa,--trap
cflags-$(CONFIG_CPU_RM7000) += $(call cc-option,-march=rm7000,-march=r5000) \
-Wa,--trap
cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1,-march=r5000) \
cflags-$(CONFIG_CPU_SB1) += $(call cc-option,-march=sb1 -mno-mdmx -mno-mips3d,-march=r5000) \
-Wa,--trap
cflags-$(CONFIG_CPU_R8000) += -march=r8000 -Wa,--trap
cflags-$(CONFIG_CPU_R10000) += $(call cc-option,-march=r10000,-march=r8000) \
......
......@@ -39,14 +39,14 @@ struct cache_desc {
#define MIPS_CACHE_PINDEX 0x00000020 /* Physically indexed cache */
struct cpuinfo_mips {
unsigned int udelay_val;
unsigned int asid_cache;
unsigned long asid_cache;
/*
* Capability and feature descriptor structure for MIPS CPU
*/
unsigned long options;
unsigned long ases;
unsigned int udelay_val;
unsigned int processor_id;
unsigned int fpu_id;
unsigned int msa_id;
......
......@@ -381,7 +381,7 @@
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
#define __NR_O32_Linux_syscalls 350
#define __NR_O32_Linux_syscalls 351
#if _MIPS_SIM == _MIPS_SIM_ABI64
......@@ -710,7 +710,7 @@
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
#define __NR_64_Linux_syscalls 310
#define __NR_64_Linux_syscalls 311
#if _MIPS_SIM == _MIPS_SIM_NABI32
......@@ -1043,6 +1043,6 @@
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
#define __NR_N32_Linux_syscalls 314
#define __NR_N32_Linux_syscalls 315
#endif /* _UAPI_ASM_UNISTD_H */
......@@ -317,7 +317,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
if (regs->regs[insn.i_format.rs] ==
regs->regs[insn.i_format.rt]) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.rt == beql_op)
if (insn.i_format.opcode == beql_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
......@@ -329,7 +329,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
if (regs->regs[insn.i_format.rs] !=
regs->regs[insn.i_format.rt]) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.rt == bnel_op)
if (insn.i_format.opcode == bnel_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
......@@ -341,7 +341,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
/* rt field assumed to be zero */
if ((long)regs->regs[insn.i_format.rs] <= 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.rt == bnel_op)
if (insn.i_format.opcode == blezl_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
......@@ -353,7 +353,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
/* rt field assumed to be zero */
if ((long)regs->regs[insn.i_format.rs] > 0) {
epc = epc + 4 + (insn.i_format.simmediate << 2);
if (insn.i_format.rt == bnel_op)
if (insn.i_format.opcode == bgtzl_op)
ret = BRANCH_LIKELY_TAKEN;
} else
epc += 8;
......
......@@ -163,7 +163,7 @@ int ptrace_get_watch_regs(struct task_struct *child,
enum pt_watch_style style;
int i;
if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
return -EIO;
if (!access_ok(VERIFY_WRITE, addr, sizeof(struct pt_watch_regs)))
return -EIO;
......@@ -177,14 +177,14 @@ int ptrace_get_watch_regs(struct task_struct *child,
#endif
__put_user(style, &addr->style);
__put_user(current_cpu_data.watch_reg_use_cnt,
__put_user(boot_cpu_data.watch_reg_use_cnt,
&addr->WATCH_STYLE.num_valid);
for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
__put_user(child->thread.watch.mips3264.watchlo[i],
&addr->WATCH_STYLE.watchlo[i]);
__put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
&addr->WATCH_STYLE.watchhi[i]);
__put_user(current_cpu_data.watch_reg_masks[i],
__put_user(boot_cpu_data.watch_reg_masks[i],
&addr->WATCH_STYLE.watch_masks[i]);
}
for (; i < 8; i++) {
......@@ -204,12 +204,12 @@ int ptrace_set_watch_regs(struct task_struct *child,
unsigned long lt[NUM_WATCH_REGS];
u16 ht[NUM_WATCH_REGS];
if (!cpu_has_watch || current_cpu_data.watch_reg_use_cnt == 0)
if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
return -EIO;
if (!access_ok(VERIFY_READ, addr, sizeof(struct pt_watch_regs)))
return -EIO;
/* Check the values. */
for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
__get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
#ifdef CONFIG_32BIT
if (lt[i] & __UA_LIMIT)
......@@ -228,7 +228,7 @@ int ptrace_set_watch_regs(struct task_struct *child,
return -EINVAL;
}
/* Install them. */
for (i = 0; i < current_cpu_data.watch_reg_use_cnt; i++) {
for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
if (lt[i] & 7)
watch_active = 1;
child->thread.watch.mips3264.watchlo[i] = lt[i];
......
......@@ -1545,7 +1545,7 @@ asmlinkage void cache_parity_error(void)
reg_val & (1<<30) ? "secondary" : "primary",
reg_val & (1<<31) ? "data" : "insn");
if (cpu_has_mips_r2 &&
((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) {
((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
reg_val & (1<<29) ? "ED " : "",
reg_val & (1<<28) ? "ET " : "",
......@@ -1585,7 +1585,7 @@ asmlinkage void do_ftlb(void)
/* For the moment, report the problem and hang. */
if (cpu_has_mips_r2 &&
((current_cpu_data.processor_id && 0xff0000) == PRID_COMP_MIPS)) {
((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
read_c0_ecc());
pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
......
......@@ -27,8 +27,7 @@
#include <cs5536/cs5536_mfgpt.h>
DEFINE_SPINLOCK(mfgpt_lock);
EXPORT_SYMBOL(mfgpt_lock);
static DEFINE_RAW_SPINLOCK(mfgpt_lock);
static u32 mfgpt_base;
......@@ -55,7 +54,7 @@ EXPORT_SYMBOL(enable_mfgpt0_counter);
static void init_mfgpt_timer(enum clock_event_mode mode,
struct clock_event_device *evt)
{
spin_lock(&mfgpt_lock);
raw_spin_lock(&mfgpt_lock);
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
......@@ -79,7 +78,7 @@ static void init_mfgpt_timer(enum clock_event_mode mode,
/* Nothing to do here */
break;
}
spin_unlock(&mfgpt_lock);
raw_spin_unlock(&mfgpt_lock);
}
static struct clock_event_device mfgpt_clockevent = {
......@@ -157,7 +156,7 @@ static cycle_t mfgpt_read(struct clocksource *cs)
static int old_count;
static u32 old_jifs;
spin_lock_irqsave(&mfgpt_lock, flags);
raw_spin_lock_irqsave(&mfgpt_lock, flags);
/*
* Although our caller may have the read side of xtime_lock,
* this is now a seqlock, and we are cheating in this routine
......@@ -191,7 +190,7 @@ static cycle_t mfgpt_read(struct clocksource *cs)
old_count = count;
old_jifs = jifs;
spin_unlock_irqrestore(&mfgpt_lock, flags);
raw_spin_unlock_irqrestore(&mfgpt_lock, flags);
return (cycle_t) (jifs * COMPARE) + count;
}
......
......@@ -273,7 +273,7 @@ void build_clear_page(void)
uasm_i_ori(&buf, A2, A0, off);
if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
uasm_i_lui(&buf, AT, 0xa000);
uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size)
* cache_line_size : 0;
......@@ -424,7 +424,7 @@ void build_copy_page(void)
uasm_i_ori(&buf, A2, A0, off);
if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
uasm_i_lui(&buf, AT, 0xa000);
uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000));
off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) *
cache_line_size : 0;
......
......@@ -27,7 +27,7 @@ unsigned long physical_memsize = 0L;
fw_memblock_t * __init fw_getmdesc(int eva)
{
char *memsize_str, *ememsize_str __maybe_unused = NULL, *ptr;
unsigned long memsize, ememsize __maybe_unused = 0;
unsigned long memsize = 0, ememsize __maybe_unused = 0;
static char cmdline[COMMAND_LINE_SIZE] __initdata;
int tmp;
......
......@@ -53,7 +53,6 @@ static struct resource rc32434_res_pci_mem1 = {
.start = 0x50000000,
.end = 0x5FFFFFFF,
.flags = IORESOURCE_MEM,
.parent = &rc32434_res_pci_mem1,
.sibling = NULL,
.child = &rc32434_res_pci_mem2
};
......
......@@ -357,7 +357,7 @@ enum {
#define AUDIT_ARCH_MIPS64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|\
__AUDIT_ARCH_CONVENTION_MIPS64_N32)
#define AUDIT_ARCH_MIPSEL64 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
#define AUDIT_ARCH_MIPSEL64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE\
#define AUDIT_ARCH_MIPSEL64N32 (EM_MIPS|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE|\
__AUDIT_ARCH_CONVENTION_MIPS64_N32)
#define AUDIT_ARCH_OPENRISC (EM_OPENRISC)
#define AUDIT_ARCH_PARISC (EM_PARISC)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册