提交 e52c8857 编写于 作者: L Linus Torvalds

Merge branch 'x86-fixes-for-linus' of...

Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: update defconfigs
  x86: msr: fix bogus return values from rdmsr_safe/wrmsr_safe
  x86: cpuid: correct return value on partial operations
  x86: msr: correct return value on partial operations
  x86: cpuid: propagate error from smp_call_function_single()
  x86: msr: propagate errors from smp_call_function_single()
  smp: have smp_call_function_single() detect invalid CPUs
此差异已折叠。
此差异已折叠。
......@@ -89,6 +89,8 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
struct cpuid_regs cmd;
int cpu = iminor(file->f_path.dentry->d_inode);
u64 pos = *ppos;
ssize_t bytes = 0;
int err = 0;
if (count % 16)
return -EINVAL; /* Invalid chunk size */
......@@ -96,14 +98,19 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
for (; count; count -= 16) {
cmd.eax = pos;
cmd.ecx = pos >> 32;
smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1);
if (copy_to_user(tmp, &cmd, 16))
return -EFAULT;
err = smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1);
if (err)
break;
if (copy_to_user(tmp, &cmd, 16)) {
err = -EFAULT;
break;
}
tmp += 16;
bytes += 16;
*ppos = ++pos;
}
return tmp - buf;
return bytes ? bytes : err;
}
static int cpuid_open(struct inode *inode, struct file *file)
......
......@@ -72,21 +72,28 @@ static ssize_t msr_read(struct file *file, char __user *buf,
u32 data[2];
u32 reg = *ppos;
int cpu = iminor(file->f_path.dentry->d_inode);
int err;
int err = 0;
ssize_t bytes = 0;
if (count % 8)
return -EINVAL; /* Invalid chunk size */
for (; count; count -= 8) {
err = rdmsr_safe_on_cpu(cpu, reg, &data[0], &data[1]);
if (err)
return -EIO;
if (copy_to_user(tmp, &data, 8))
return -EFAULT;
if (err) {
if (err == -EFAULT) /* Fix idiotic error code */
err = -EIO;
break;
}
if (copy_to_user(tmp, &data, 8)) {
err = -EFAULT;
break;
}
tmp += 2;
bytes += 8;
}
return ((char __user *)tmp) - buf;
return bytes ? bytes : err;
}
static ssize_t msr_write(struct file *file, const char __user *buf,
......@@ -96,21 +103,28 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
u32 data[2];
u32 reg = *ppos;
int cpu = iminor(file->f_path.dentry->d_inode);
int err;
int err = 0;
ssize_t bytes = 0;
if (count % 8)
return -EINVAL; /* Invalid chunk size */
for (; count; count -= 8) {
if (copy_from_user(&data, tmp, 8))
return -EFAULT;
if (copy_from_user(&data, tmp, 8)) {
err = -EFAULT;
break;
}
err = wrmsr_safe_on_cpu(cpu, reg, data[0], data[1]);
if (err)
return -EIO;
if (err) {
if (err == -EFAULT) /* Fix idiotic error code */
err = -EIO;
break;
}
tmp += 2;
bytes += 8;
}
return ((char __user *)tmp) - buf;
return bytes ? bytes : err;
}
static int msr_open(struct inode *inode, struct file *file)
......
......@@ -30,10 +30,11 @@ static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe)
rv.msr_no = msr_no;
if (safe) {
smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1);
err = rv.err;
err = smp_call_function_single(cpu, __rdmsr_safe_on_cpu,
&rv, 1);
err = err ? err : rv.err;
} else {
smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
err = smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1);
}
*l = rv.l;
*h = rv.h;
......@@ -64,23 +65,24 @@ static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe)
rv.l = l;
rv.h = h;
if (safe) {
smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1);
err = rv.err;
err = smp_call_function_single(cpu, __wrmsr_safe_on_cpu,
&rv, 1);
err = err ? err : rv.err;
} else {
smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
err = smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1);
}
return err;
}
void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
_wrmsr_on_cpu(cpu, msr_no, l, h, 0);
return _wrmsr_on_cpu(cpu, msr_no, l, h, 0);
}
void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
{
_rdmsr_on_cpu(cpu, msr_no, l, h, 0);
return _rdmsr_on_cpu(cpu, msr_no, l, h, 0);
}
/* These "safe" variants are slower and should be used when the target MSR
......
......@@ -52,14 +52,14 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
{
DECLARE_ARGS(val, low, high);
asm volatile("2: rdmsr ; xor %0,%0\n"
asm volatile("2: rdmsr ; xor %[err],%[err]\n"
"1:\n\t"
".section .fixup,\"ax\"\n\t"
"3: mov %3,%0 ; jmp 1b\n\t"
"3: mov %[fault],%[err] ; jmp 1b\n\t"
".previous\n\t"
_ASM_EXTABLE(2b, 3b)
: "=r" (*err), EAX_EDX_RET(val, low, high)
: "c" (msr), "i" (-EFAULT));
: [err] "=r" (*err), EAX_EDX_RET(val, low, high)
: "c" (msr), [fault] "i" (-EFAULT));
return EAX_EDX_VAL(val, low, high);
}
......@@ -73,15 +73,15 @@ static inline int native_write_msr_safe(unsigned int msr,
unsigned low, unsigned high)
{
int err;
asm volatile("2: wrmsr ; xor %0,%0\n"
asm volatile("2: wrmsr ; xor %[err],%[err]\n"
"1:\n\t"
".section .fixup,\"ax\"\n\t"
"3: mov %4,%0 ; jmp 1b\n\t"
"3: mov %[fault],%[err] ; jmp 1b\n\t"
".previous\n\t"
_ASM_EXTABLE(2b, 3b)
: "=a" (err)
: [err] "=a" (err)
: "c" (msr), "0" (low), "d" (high),
"i" (-EFAULT)
[fault] "i" (-EFAULT)
: "memory");
return err;
}
......@@ -192,19 +192,20 @@ do { \
#define write_rdtscp_aux(val) wrmsr(0xc0000103, (val), 0)
#ifdef CONFIG_SMP
void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h);
int wrmsr_safe_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h);
#else /* CONFIG_SMP */
static inline void rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
static inline int rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h)
{
rdmsr(msr_no, *l, *h);
return 0;
}
static inline void wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
static inline int wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h)
{
wrmsr(msr_no, l, h);
return 0;
}
static inline int rdmsr_safe_on_cpu(unsigned int cpu, u32 msr_no,
u32 *l, u32 *h)
......
......@@ -210,8 +210,10 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
{
struct call_single_data d;
unsigned long flags;
/* prevent preemption and reschedule on another processor */
/* prevent preemption and reschedule on another processor,
as well as CPU removal */
int me = get_cpu();
int err = 0;
/* Can deadlock when called with interrupts disabled */
WARN_ON(irqs_disabled());
......@@ -220,7 +222,7 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
local_irq_save(flags);
func(info);
local_irq_restore(flags);
} else {
} else if ((unsigned)cpu < NR_CPUS && cpu_online(cpu)) {
struct call_single_data *data = NULL;
if (!wait) {
......@@ -236,10 +238,12 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
data->func = func;
data->info = info;
generic_exec_single(cpu, data);
} else {
err = -ENXIO; /* CPU not online */
}
put_cpu();
return 0;
return err;
}
EXPORT_SYMBOL(smp_call_function_single);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册