提交 95c354fe 编写于 作者: N Nick Piggin 提交者: Ingo Molnar

spinlock: lockbreak cleanup

The break_lock data structure and code for spinlocks is quite nasty.
Not only does it double the size of a spinlock but it changes locking to
a potentially less optimal trylock.

Put all of that under CONFIG_GENERIC_LOCKBREAK, and introduce a
__raw_spin_is_contended that uses the lock data itself to determine whether
there are waiters on the lock, to be used if CONFIG_GENERIC_LOCKBREAK is
not set.

Rename need_lockbreak to spin_needbreak, make it use spin_is_contended to
decouple it from the spinlock implementation, and make it typesafe (rwlocks
do not have any need_lockbreak sites -- why do they even get bloated up
with that break_lock then?).
Signed-off-by: NNick Piggin <npiggin@suse.de>
Signed-off-by: NIngo Molnar <mingo@elte.hu>
Signed-off-by: NThomas Gleixner <tglx@linutronix.de>
上级 a95d67f8
...@@ -91,6 +91,11 @@ config GENERIC_IRQ_PROBE ...@@ -91,6 +91,11 @@ config GENERIC_IRQ_PROBE
bool bool
default y default y
config GENERIC_LOCKBREAK
bool
default y
depends on SMP && PREEMPT
config RWSEM_GENERIC_SPINLOCK config RWSEM_GENERIC_SPINLOCK
bool bool
default y default y
......
...@@ -42,6 +42,11 @@ config MMU ...@@ -42,6 +42,11 @@ config MMU
config SWIOTLB config SWIOTLB
bool bool
config GENERIC_LOCKBREAK
bool
default y
depends on SMP && PREEMPT
config RWSEM_XCHGADD_ALGORITHM config RWSEM_XCHGADD_ALGORITHM
bool bool
default y default y
......
...@@ -235,6 +235,11 @@ config IRAM_SIZE ...@@ -235,6 +235,11 @@ config IRAM_SIZE
# Define implied options from the CPU selection here # Define implied options from the CPU selection here
# #
config GENERIC_LOCKBREAK
bool
default y
depends on SMP && PREEMPT
config RWSEM_GENERIC_SPINLOCK config RWSEM_GENERIC_SPINLOCK
bool bool
depends on M32R depends on M32R
......
...@@ -694,6 +694,11 @@ source "arch/mips/vr41xx/Kconfig" ...@@ -694,6 +694,11 @@ source "arch/mips/vr41xx/Kconfig"
endmenu endmenu
config GENERIC_LOCKBREAK
bool
default y
depends on SMP && PREEMPT
config RWSEM_GENERIC_SPINLOCK config RWSEM_GENERIC_SPINLOCK
bool bool
default y default y
......
...@@ -19,6 +19,11 @@ config MMU ...@@ -19,6 +19,11 @@ config MMU
config STACK_GROWSUP config STACK_GROWSUP
def_bool y def_bool y
config GENERIC_LOCKBREAK
bool
default y
depends on SMP && PREEMPT
config RWSEM_GENERIC_SPINLOCK config RWSEM_GENERIC_SPINLOCK
def_bool y def_bool y
......
...@@ -53,6 +53,11 @@ config RWSEM_XCHGADD_ALGORITHM ...@@ -53,6 +53,11 @@ config RWSEM_XCHGADD_ALGORITHM
bool bool
default y default y
config GENERIC_LOCKBREAK
bool
default y
depends on SMP && PREEMPT
config ARCH_HAS_ILOG2_U32 config ARCH_HAS_ILOG2_U32
bool bool
default y default y
......
...@@ -200,6 +200,11 @@ config US2E_FREQ ...@@ -200,6 +200,11 @@ config US2E_FREQ
If in doubt, say N. If in doubt, say N.
# Global things across all Sun machines. # Global things across all Sun machines.
config GENERIC_LOCKBREAK
bool
default y
depends on SMP && PREEMPT
config RWSEM_GENERIC_SPINLOCK config RWSEM_GENERIC_SPINLOCK
bool bool
......
...@@ -19,6 +19,10 @@ config X86_64 ...@@ -19,6 +19,10 @@ config X86_64
config X86 config X86
def_bool y def_bool y
config GENERIC_LOCKBREAK
def_bool y
depends on SMP && PREEMPT
config GENERIC_TIME config GENERIC_TIME
def_bool y def_bool y
......
...@@ -347,7 +347,8 @@ int log_do_checkpoint(journal_t *journal) ...@@ -347,7 +347,8 @@ int log_do_checkpoint(journal_t *journal)
break; break;
} }
retry = __process_buffer(journal, jh, bhs,&batch_count); retry = __process_buffer(journal, jh, bhs,&batch_count);
if (!retry && lock_need_resched(&journal->j_list_lock)){ if (!retry && (need_resched() ||
spin_needbreak(&journal->j_list_lock))) {
spin_unlock(&journal->j_list_lock); spin_unlock(&journal->j_list_lock);
retry = 1; retry = 1;
break; break;
......
...@@ -265,7 +265,7 @@ static void journal_submit_data_buffers(journal_t *journal, ...@@ -265,7 +265,7 @@ static void journal_submit_data_buffers(journal_t *journal,
put_bh(bh); put_bh(bh);
} }
if (lock_need_resched(&journal->j_list_lock)) { if (need_resched() || spin_needbreak(&journal->j_list_lock)) {
spin_unlock(&journal->j_list_lock); spin_unlock(&journal->j_list_lock);
goto write_out_data; goto write_out_data;
} }
......
...@@ -353,7 +353,8 @@ int jbd2_log_do_checkpoint(journal_t *journal) ...@@ -353,7 +353,8 @@ int jbd2_log_do_checkpoint(journal_t *journal)
} }
retry = __process_buffer(journal, jh, bhs, &batch_count, retry = __process_buffer(journal, jh, bhs, &batch_count,
transaction); transaction);
if (!retry && lock_need_resched(&journal->j_list_lock)){ if (!retry && (need_resched() ||
spin_needbreak(&journal->j_list_lock))) {
spin_unlock(&journal->j_list_lock); spin_unlock(&journal->j_list_lock);
retry = 1; retry = 1;
break; break;
......
...@@ -341,7 +341,7 @@ static void journal_submit_data_buffers(journal_t *journal, ...@@ -341,7 +341,7 @@ static void journal_submit_data_buffers(journal_t *journal,
put_bh(bh); put_bh(bh);
} }
if (lock_need_resched(&journal->j_list_lock)) { if (need_resched() || spin_needbreak(&journal->j_list_lock)) {
spin_unlock(&journal->j_list_lock); spin_unlock(&journal->j_list_lock);
goto write_out_data; goto write_out_data;
} }
......
...@@ -1922,23 +1922,16 @@ extern int cond_resched_softirq(void); ...@@ -1922,23 +1922,16 @@ extern int cond_resched_softirq(void);
/* /*
* Does a critical section need to be broken due to another * Does a critical section need to be broken due to another
* task waiting?: * task waiting?: (technically does not depend on CONFIG_PREEMPT,
* but a general need for low latency)
*/ */
#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) static inline int spin_needbreak(spinlock_t *lock)
# define need_lockbreak(lock) ((lock)->break_lock)
#else
# define need_lockbreak(lock) 0
#endif
/*
* Does a critical section need to be broken due to another
* task waiting or preemption being signalled:
*/
static inline int lock_need_resched(spinlock_t *lock)
{ {
if (need_lockbreak(lock) || need_resched()) #ifdef CONFIG_PREEMPT
return 1; return spin_is_contended(lock);
#else
return 0; return 0;
#endif
} }
/* /*
......
...@@ -120,6 +120,12 @@ do { \ ...@@ -120,6 +120,12 @@ do { \
#define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock)
#ifdef CONFIG_GENERIC_LOCKBREAK
#define spin_is_contended(lock) ((lock)->break_lock)
#else
#define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock)
#endif
/** /**
* spin_unlock_wait - wait until the spinlock gets unlocked * spin_unlock_wait - wait until the spinlock gets unlocked
* @lock: the spinlock in question. * @lock: the spinlock in question.
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
typedef struct { typedef struct {
raw_spinlock_t raw_lock; raw_spinlock_t raw_lock;
#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) #ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock; unsigned int break_lock;
#endif #endif
#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_DEBUG_SPINLOCK
...@@ -35,7 +35,7 @@ typedef struct { ...@@ -35,7 +35,7 @@ typedef struct {
typedef struct { typedef struct {
raw_rwlock_t raw_lock; raw_rwlock_t raw_lock;
#if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) #ifdef CONFIG_GENERIC_LOCKBREAK
unsigned int break_lock; unsigned int break_lock;
#endif #endif
#ifdef CONFIG_DEBUG_SPINLOCK #ifdef CONFIG_DEBUG_SPINLOCK
......
...@@ -64,6 +64,8 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) ...@@ -64,6 +64,8 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
# define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) # define __raw_spin_trylock(lock) ({ (void)(lock); 1; })
#endif /* DEBUG_SPINLOCK */ #endif /* DEBUG_SPINLOCK */
#define __raw_spin_is_contended(lock) (((void)(lock), 0))
#define __raw_read_can_lock(lock) (((void)(lock), 1)) #define __raw_read_can_lock(lock) (((void)(lock), 1))
#define __raw_write_can_lock(lock) (((void)(lock), 1)) #define __raw_write_can_lock(lock) (((void)(lock), 1))
......
...@@ -4945,19 +4945,15 @@ EXPORT_SYMBOL(_cond_resched); ...@@ -4945,19 +4945,15 @@ EXPORT_SYMBOL(_cond_resched);
*/ */
int cond_resched_lock(spinlock_t *lock) int cond_resched_lock(spinlock_t *lock)
{ {
int resched = need_resched() && system_state == SYSTEM_RUNNING;
int ret = 0; int ret = 0;
if (need_lockbreak(lock)) { if (spin_needbreak(lock) || resched) {
spin_unlock(lock); spin_unlock(lock);
cpu_relax(); if (resched && need_resched())
ret = 1; __cond_resched();
spin_lock(lock); else
} cpu_relax();
if (need_resched() && system_state == SYSTEM_RUNNING) {
spin_release(&lock->dep_map, 1, _THIS_IP_);
_raw_spin_unlock(lock);
preempt_enable_no_resched();
__cond_resched();
ret = 1; ret = 1;
spin_lock(lock); spin_lock(lock);
} }
......
...@@ -65,8 +65,7 @@ EXPORT_SYMBOL(_write_trylock); ...@@ -65,8 +65,7 @@ EXPORT_SYMBOL(_write_trylock);
* even on CONFIG_PREEMPT, because lockdep assumes that interrupts are * even on CONFIG_PREEMPT, because lockdep assumes that interrupts are
* not re-enabled during lock-acquire (which the preempt-spin-ops do): * not re-enabled during lock-acquire (which the preempt-spin-ops do):
*/ */
#if !defined(CONFIG_PREEMPT) || !defined(CONFIG_SMP) || \ #if !defined(CONFIG_GENERIC_LOCKBREAK) || defined(CONFIG_DEBUG_LOCK_ALLOC)
defined(CONFIG_DEBUG_LOCK_ALLOC)
void __lockfunc _read_lock(rwlock_t *lock) void __lockfunc _read_lock(rwlock_t *lock)
{ {
......
...@@ -513,8 +513,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -513,8 +513,7 @@ static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
if (progress >= 32) { if (progress >= 32) {
progress = 0; progress = 0;
if (need_resched() || if (need_resched() ||
need_lockbreak(src_ptl) || spin_needbreak(src_ptl) || spin_needbreak(dst_ptl))
need_lockbreak(dst_ptl))
break; break;
} }
if (pte_none(*src_pte)) { if (pte_none(*src_pte)) {
...@@ -853,7 +852,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp, ...@@ -853,7 +852,7 @@ unsigned long unmap_vmas(struct mmu_gather **tlbp,
tlb_finish_mmu(*tlbp, tlb_start, start); tlb_finish_mmu(*tlbp, tlb_start, start);
if (need_resched() || if (need_resched() ||
(i_mmap_lock && need_lockbreak(i_mmap_lock))) { (i_mmap_lock && spin_needbreak(i_mmap_lock))) {
if (i_mmap_lock) { if (i_mmap_lock) {
*tlbp = NULL; *tlbp = NULL;
goto out; goto out;
...@@ -1768,8 +1767,7 @@ static int unmap_mapping_range_vma(struct vm_area_struct *vma, ...@@ -1768,8 +1767,7 @@ static int unmap_mapping_range_vma(struct vm_area_struct *vma,
restart_addr = zap_page_range(vma, start_addr, restart_addr = zap_page_range(vma, start_addr,
end_addr - start_addr, details); end_addr - start_addr, details);
need_break = need_resched() || need_break = need_resched() || spin_needbreak(details->i_mmap_lock);
need_lockbreak(details->i_mmap_lock);
if (restart_addr >= end_addr) { if (restart_addr >= end_addr) {
/* We have now completed this vma: mark it so */ /* We have now completed this vma: mark it so */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册