提交 f89db789 编写于 作者: L Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "Two documentation updates, plus a debugging annotation fix"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/crash: Update the stale comment in reserve_crashkernel()
  x86/irq, trace: Add __irq_entry annotation to x86's platform IRQ handlers
  Documentation, x86, resctrl: Recommend locking for resctrlfs
......@@ -212,3 +212,117 @@ Finally we move core 4-7 over to the new group and make sure that the
kernel and the tasks running there get 50% of the cache.
# echo C0 > p0/cpus
4) Locking between applications
Certain operations on the resctrl filesystem, composed of read/writes
to/from multiple files, must be atomic.
As an example, the allocation of an exclusive reservation of L3 cache
involves:
1. Read the cbmmasks from each directory
2. Find a contiguous set of bits in the global CBM bitmask that is clear
in any of the directory cbmmasks
3. Create a new directory
4. Set the bits found in step 2 to the new directory "schemata" file
If two applications attempt to allocate space concurrently then they can
end up allocating the same bits so the reservations are shared instead of
exclusive.
To coordinate atomic operations on the resctrlfs and to avoid the problem
above, the following locking procedure is recommended:
Locking is based on flock, which is available in libc and also as a shell
script command
Write lock:
A) Take flock(LOCK_EX) on /sys/fs/resctrl
B) Read/write the directory structure.
C) funlock
Read lock:
A) Take flock(LOCK_SH) on /sys/fs/resctrl
B) If success read the directory structure.
C) funlock
Example with bash:
# Atomically read directory structure
$ flock -s /sys/fs/resctrl/ find /sys/fs/resctrl
# Read directory contents and create new subdirectory
$ cat create-dir.sh
find /sys/fs/resctrl/ > output.txt
mask = function-of(output.txt)
mkdir /sys/fs/resctrl/newres/
echo mask > /sys/fs/resctrl/newres/schemata
$ flock /sys/fs/resctrl/ ./create-dir.sh
Example with C:
/*
* Example code do take advisory locks
* before accessing resctrl filesystem
*/
#include <sys/file.h>
#include <stdlib.h>
void resctrl_take_shared_lock(int fd)
{
int ret;
/* take shared lock on resctrl filesystem */
ret = flock(fd, LOCK_SH);
if (ret) {
perror("flock");
exit(-1);
}
}
void resctrl_take_exclusive_lock(int fd)
{
int ret;
/* release lock on resctrl filesystem */
ret = flock(fd, LOCK_EX);
if (ret) {
perror("flock");
exit(-1);
}
}
void resctrl_release_lock(int fd)
{
int ret;
/* take shared lock on resctrl filesystem */
ret = flock(fd, LOCK_UN);
if (ret) {
perror("flock");
exit(-1);
}
}
void main(void)
{
int fd, ret;
fd = open("/sys/fs/resctrl", O_DIRECTORY);
if (fd == -1) {
perror("open");
exit(-1);
}
resctrl_take_shared_lock(fd);
/* code to read directory contents */
resctrl_release_lock(fd);
resctrl_take_exclusive_lock(fd);
/* code to read and write directory contents */
resctrl_release_lock(fd);
}
......@@ -1865,14 +1865,14 @@ static void __smp_spurious_interrupt(u8 vector)
"should never happen.\n", vector, smp_processor_id());
}
__visible void smp_spurious_interrupt(struct pt_regs *regs)
__visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs)
{
entering_irq();
__smp_spurious_interrupt(~regs->orig_ax);
exiting_irq();
}
__visible void smp_trace_spurious_interrupt(struct pt_regs *regs)
__visible void __irq_entry smp_trace_spurious_interrupt(struct pt_regs *regs)
{
u8 vector = ~regs->orig_ax;
......@@ -1923,14 +1923,14 @@ static void __smp_error_interrupt(struct pt_regs *regs)
}
__visible void smp_error_interrupt(struct pt_regs *regs)
__visible void __irq_entry smp_error_interrupt(struct pt_regs *regs)
{
entering_irq();
__smp_error_interrupt(regs);
exiting_irq();
}
__visible void smp_trace_error_interrupt(struct pt_regs *regs)
__visible void __irq_entry smp_trace_error_interrupt(struct pt_regs *regs)
{
entering_irq();
trace_error_apic_entry(ERROR_APIC_VECTOR);
......
......@@ -559,7 +559,7 @@ void send_cleanup_vector(struct irq_cfg *cfg)
__send_cleanup_vector(data);
}
asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
asmlinkage __visible void __irq_entry smp_irq_move_cleanup_interrupt(void)
{
unsigned vector, me;
......
......@@ -816,14 +816,14 @@ static inline void __smp_deferred_error_interrupt(void)
deferred_error_int_vector();
}
asmlinkage __visible void smp_deferred_error_interrupt(void)
asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void)
{
entering_irq();
__smp_deferred_error_interrupt();
exiting_ack_irq();
}
asmlinkage __visible void smp_trace_deferred_error_interrupt(void)
asmlinkage __visible void __irq_entry smp_trace_deferred_error_interrupt(void)
{
entering_irq();
trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
......
......@@ -396,14 +396,16 @@ static inline void __smp_thermal_interrupt(void)
smp_thermal_vector();
}
asmlinkage __visible void smp_thermal_interrupt(struct pt_regs *regs)
asmlinkage __visible void __irq_entry
smp_thermal_interrupt(struct pt_regs *regs)
{
entering_irq();
__smp_thermal_interrupt();
exiting_ack_irq();
}
asmlinkage __visible void smp_trace_thermal_interrupt(struct pt_regs *regs)
asmlinkage __visible void __irq_entry
smp_trace_thermal_interrupt(struct pt_regs *regs)
{
entering_irq();
trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
......
......@@ -23,14 +23,14 @@ static inline void __smp_threshold_interrupt(void)
mce_threshold_vector();
}
asmlinkage __visible void smp_threshold_interrupt(void)
asmlinkage __visible void __irq_entry smp_threshold_interrupt(void)
{
entering_irq();
__smp_threshold_interrupt();
exiting_ack_irq();
}
asmlinkage __visible void smp_trace_threshold_interrupt(void)
asmlinkage __visible void __irq_entry smp_trace_threshold_interrupt(void)
{
entering_irq();
trace_threshold_apic_entry(THRESHOLD_APIC_VECTOR);
......
......@@ -264,7 +264,7 @@ void __smp_x86_platform_ipi(void)
x86_platform_ipi_callback();
}
__visible void smp_x86_platform_ipi(struct pt_regs *regs)
__visible void __irq_entry smp_x86_platform_ipi(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
......@@ -315,7 +315,7 @@ __visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs)
}
#endif
__visible void smp_trace_x86_platform_ipi(struct pt_regs *regs)
__visible void __irq_entry smp_trace_x86_platform_ipi(struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
......
......@@ -9,6 +9,7 @@
#include <linux/hardirq.h>
#include <asm/apic.h>
#include <asm/trace/irq_vectors.h>
#include <linux/interrupt.h>
static inline void __smp_irq_work_interrupt(void)
{
......@@ -16,14 +17,14 @@ static inline void __smp_irq_work_interrupt(void)
irq_work_run();
}
__visible void smp_irq_work_interrupt(struct pt_regs *regs)
__visible void __irq_entry smp_irq_work_interrupt(struct pt_regs *regs)
{
ipi_entering_ack_irq();
__smp_irq_work_interrupt();
exiting_irq();
}
__visible void smp_trace_irq_work_interrupt(struct pt_regs *regs)
__visible void __irq_entry smp_trace_irq_work_interrupt(struct pt_regs *regs)
{
ipi_entering_ack_irq();
trace_irq_work_entry(IRQ_WORK_VECTOR);
......
......@@ -575,7 +575,9 @@ static void __init reserve_crashkernel(void)
/* 0 means: find the address automatically */
if (crash_base <= 0) {
/*
* kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
* Set CRASH_ADDR_LOW_MAX upper bound for crash memory,
* as old kexec-tools loads bzImage below that, unless
* "crashkernel=size[KMG],high" is specified.
*/
crash_base = memblock_find_in_range(CRASH_ALIGN,
high ? CRASH_ADDR_HIGH_MAX
......
......@@ -259,7 +259,7 @@ static inline void __smp_reschedule_interrupt(void)
scheduler_ipi();
}
__visible void smp_reschedule_interrupt(struct pt_regs *regs)
__visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs)
{
ack_APIC_irq();
__smp_reschedule_interrupt();
......@@ -268,7 +268,7 @@ __visible void smp_reschedule_interrupt(struct pt_regs *regs)
*/
}
__visible void smp_trace_reschedule_interrupt(struct pt_regs *regs)
__visible void __irq_entry smp_trace_reschedule_interrupt(struct pt_regs *regs)
{
/*
* Need to call irq_enter() before calling the trace point.
......@@ -292,14 +292,15 @@ static inline void __smp_call_function_interrupt(void)
inc_irq_stat(irq_call_count);
}
__visible void smp_call_function_interrupt(struct pt_regs *regs)
__visible void __irq_entry smp_call_function_interrupt(struct pt_regs *regs)
{
ipi_entering_ack_irq();
__smp_call_function_interrupt();
exiting_irq();
}
__visible void smp_trace_call_function_interrupt(struct pt_regs *regs)
__visible void __irq_entry
smp_trace_call_function_interrupt(struct pt_regs *regs)
{
ipi_entering_ack_irq();
trace_call_function_entry(CALL_FUNCTION_VECTOR);
......@@ -314,14 +315,16 @@ static inline void __smp_call_function_single_interrupt(void)
inc_irq_stat(irq_call_count);
}
__visible void smp_call_function_single_interrupt(struct pt_regs *regs)
__visible void __irq_entry
smp_call_function_single_interrupt(struct pt_regs *regs)
{
ipi_entering_ack_irq();
__smp_call_function_single_interrupt();
exiting_irq();
}
__visible void smp_trace_call_function_single_interrupt(struct pt_regs *regs)
__visible void __irq_entry
smp_trace_call_function_single_interrupt(struct pt_regs *regs)
{
ipi_entering_ack_irq();
trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册