提交 7be141d0 编写于 作者: L Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "A couple of EFI fixes, plus misc fixes all around the map"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  efi/arm64: Store Runtime Services revision
  firmware: Do not use WARN_ON(!spin_is_locked())
  x86_32, entry: Clean up sysenter_badsys declaration
  x86/doc: Fix the 'tlb_single_page_flush_ceiling' sysconfig path
  x86/mm: Fix sparse 'tlb_single_page_flush_ceiling' warning and make the variable read-mostly
  x86/mm: Fix RCU splat from new TLB tracepoints
...@@ -35,7 +35,7 @@ invlpg instruction (or instructions _near_ it) show up high in ...@@ -35,7 +35,7 @@ invlpg instruction (or instructions _near_ it) show up high in
profiles. If you believe that individual invalidations being profiles. If you believe that individual invalidations being
called too often, you can lower the tunable: called too often, you can lower the tunable:
/sys/debug/kernel/x86/tlb_single_page_flush_ceiling /sys/kernel/debug/x86/tlb_single_page_flush_ceiling
This will cause us to do the global flush for more cases. This will cause us to do the global flush for more cases.
Lowering it to 0 will disable the use of the individual flushes. Lowering it to 0 will disable the use of the individual flushes.
......
...@@ -465,6 +465,8 @@ static int __init arm64_enter_virtual_mode(void) ...@@ -465,6 +465,8 @@ static int __init arm64_enter_virtual_mode(void)
efi_native_runtime_setup(); efi_native_runtime_setup();
set_bit(EFI_RUNTIME_SERVICES, &efi.flags); set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
efi.runtime_version = efi.systab->hdr.revision;
return 0; return 0;
err_unmap: err_unmap:
......
...@@ -683,7 +683,7 @@ END(syscall_badsys) ...@@ -683,7 +683,7 @@ END(syscall_badsys)
sysenter_badsys: sysenter_badsys:
movl $-ENOSYS,%eax movl $-ENOSYS,%eax
jmp sysenter_after_call jmp sysenter_after_call
END(syscall_badsys) END(sysenter_badsys)
CFI_ENDPROC CFI_ENDPROC
.macro FIXUP_ESPFIX_STACK .macro FIXUP_ESPFIX_STACK
......
...@@ -49,7 +49,13 @@ void leave_mm(int cpu) ...@@ -49,7 +49,13 @@ void leave_mm(int cpu)
if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) { if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
cpumask_clear_cpu(cpu, mm_cpumask(active_mm)); cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
load_cr3(swapper_pg_dir); load_cr3(swapper_pg_dir);
trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); /*
* This gets called in the idle path where RCU
* functions differently. Tracing normally
* uses RCU, so we have to call the tracepoint
* specially here.
*/
trace_tlb_flush_rcuidle(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
} }
} }
EXPORT_SYMBOL_GPL(leave_mm); EXPORT_SYMBOL_GPL(leave_mm);
...@@ -174,7 +180,7 @@ void flush_tlb_current_task(void) ...@@ -174,7 +180,7 @@ void flush_tlb_current_task(void)
* *
* This is in units of pages. * This is in units of pages.
*/ */
unsigned long tlb_single_page_flush_ceiling = 33; static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag) unsigned long end, unsigned long vmflag)
......
...@@ -481,7 +481,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_remove); ...@@ -481,7 +481,7 @@ EXPORT_SYMBOL_GPL(efivar_entry_remove);
*/ */
static void efivar_entry_list_del_unlock(struct efivar_entry *entry) static void efivar_entry_list_del_unlock(struct efivar_entry *entry)
{ {
WARN_ON(!spin_is_locked(&__efivars->lock)); lockdep_assert_held(&__efivars->lock);
list_del(&entry->list); list_del(&entry->list);
spin_unlock_irq(&__efivars->lock); spin_unlock_irq(&__efivars->lock);
...@@ -507,7 +507,7 @@ int __efivar_entry_delete(struct efivar_entry *entry) ...@@ -507,7 +507,7 @@ int __efivar_entry_delete(struct efivar_entry *entry)
const struct efivar_operations *ops = __efivars->ops; const struct efivar_operations *ops = __efivars->ops;
efi_status_t status; efi_status_t status;
WARN_ON(!spin_is_locked(&__efivars->lock)); lockdep_assert_held(&__efivars->lock);
status = ops->set_variable(entry->var.VariableName, status = ops->set_variable(entry->var.VariableName,
&entry->var.VendorGuid, &entry->var.VendorGuid,
...@@ -667,7 +667,7 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid, ...@@ -667,7 +667,7 @@ struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
int strsize1, strsize2; int strsize1, strsize2;
bool found = false; bool found = false;
WARN_ON(!spin_is_locked(&__efivars->lock)); lockdep_assert_held(&__efivars->lock);
list_for_each_entry_safe(entry, n, head, list) { list_for_each_entry_safe(entry, n, head, list) {
strsize1 = ucs2_strsize(name, 1024); strsize1 = ucs2_strsize(name, 1024);
...@@ -739,7 +739,7 @@ int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes, ...@@ -739,7 +739,7 @@ int __efivar_entry_get(struct efivar_entry *entry, u32 *attributes,
const struct efivar_operations *ops = __efivars->ops; const struct efivar_operations *ops = __efivars->ops;
efi_status_t status; efi_status_t status;
WARN_ON(!spin_is_locked(&__efivars->lock)); lockdep_assert_held(&__efivars->lock);
status = ops->get_variable(entry->var.VariableName, status = ops->get_variable(entry->var.VariableName,
&entry->var.VendorGuid, &entry->var.VendorGuid,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册