Skip to content
体验新版
项目
组织
正在加载...
登录
切换导航
打开侧边栏
openeuler
Kernel
提交
3d150630
K
Kernel
项目概览
openeuler
/
Kernel
大约 1 年 前同步成功
通知
5
Star
0
Fork
0
代码
文件
提交
分支
Tags
贡献者
分支图
Diff
Issue
0
列表
看板
标记
里程碑
合并请求
0
DevOps
流水线
流水线任务
计划
Wiki
0
Wiki
分析
仓库
DevOps
项目成员
Pages
K
Kernel
项目概览
项目概览
详情
发布
仓库
仓库
文件
提交
分支
标签
贡献者
分支图
比较
Issue
0
Issue
0
列表
看板
标记
里程碑
合并请求
0
合并请求
0
Pages
DevOps
DevOps
流水线
流水线任务
计划
分析
分析
仓库分析
DevOps
Wiki
0
Wiki
成员
成员
收起侧边栏
关闭侧边栏
动态
分支图
创建新Issue
流水线任务
提交
Issue看板
体验新版 GitCode,发现更多精彩内容 >>
提交
3d150630
编写于
6月 13, 2009
作者:
M
Mike Frysinger
浏览文件
操作
浏览文件
下载
电子邮件补丁
差异文件
Blackfin: convert locking primitives to asm-generic
Signed-off-by:
N
Mike Frysinger
<
vapier@gentoo.org
>
上级
22a151c1
变更
6
隐藏空白更改
内联
并排
Showing
6 changed file
with
25 addition
and
308 deletion
+25
-308
arch/blackfin/include/asm/atomic.h
arch/blackfin/include/asm/atomic.h
+7
-102
arch/blackfin/include/asm/bitops.h
arch/blackfin/include/asm/bitops.h
+9
-189
arch/blackfin/include/asm/mutex.h
arch/blackfin/include/asm/mutex.h
+1
-1
arch/blackfin/include/asm/spinlock.h
arch/blackfin/include/asm/spinlock.h
+6
-0
arch/blackfin/include/asm/swab.h
arch/blackfin/include/asm/swab.h
+1
-5
arch/blackfin/include/asm/unaligned.h
arch/blackfin/include/asm/unaligned.h
+1
-11
未找到文件。
arch/blackfin/include/asm/atomic.h
浏览文件 @
3d150630
#ifndef __ARCH_BLACKFIN_ATOMIC__
#define __ARCH_BLACKFIN_ATOMIC__
#ifndef CONFIG_SMP
# include <asm-generic/atomic.h>
#else
#include <linux/types.h>
#include <asm/system.h>
/* local_irq_XXX() */
/*
* Atomic operations that C can't guarantee us. Useful for
* resource counting etc..
*
* Generally we do not concern about SMP BFIN systems, so we don't have
* to deal with that.
*
* Tony Kou (tonyko@lineo.ca) Lineo Inc. 2001
*/
#define ATOMIC_INIT(i) { (i) }
#define atomic_set(v, i) (((v)->counter) = i)
#ifdef CONFIG_SMP
#define atomic_read(v) __raw_uncached_fetch_asm(&(v)->counter)
asmlinkage
int
__raw_uncached_fetch_asm
(
const
volatile
int
*
ptr
);
...
...
@@ -84,100 +81,6 @@ static inline int atomic_test_mask(int mask, atomic_t *v)
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#else
/* !CONFIG_SMP */
#define atomic_read(v) ((v)->counter)
static
inline
void
atomic_add
(
int
i
,
atomic_t
*
v
)
{
unsigned
long
flags
;
local_irq_save_hw
(
flags
);
v
->
counter
+=
i
;
local_irq_restore_hw
(
flags
);
}
static
inline
void
atomic_sub
(
int
i
,
atomic_t
*
v
)
{
unsigned
long
flags
;
local_irq_save_hw
(
flags
);
v
->
counter
-=
i
;
local_irq_restore_hw
(
flags
);
}
static
inline
int
atomic_add_return
(
int
i
,
atomic_t
*
v
)
{
int
__temp
=
0
;
unsigned
long
flags
;
local_irq_save_hw
(
flags
);
v
->
counter
+=
i
;
__temp
=
v
->
counter
;
local_irq_restore_hw
(
flags
);
return
__temp
;
}
static
inline
int
atomic_sub_return
(
int
i
,
atomic_t
*
v
)
{
int
__temp
=
0
;
unsigned
long
flags
;
local_irq_save_hw
(
flags
);
v
->
counter
-=
i
;
__temp
=
v
->
counter
;
local_irq_restore_hw
(
flags
);
return
__temp
;
}
static
inline
void
atomic_inc
(
volatile
atomic_t
*
v
)
{
unsigned
long
flags
;
local_irq_save_hw
(
flags
);
v
->
counter
++
;
local_irq_restore_hw
(
flags
);
}
static
inline
void
atomic_dec
(
volatile
atomic_t
*
v
)
{
unsigned
long
flags
;
local_irq_save_hw
(
flags
);
v
->
counter
--
;
local_irq_restore_hw
(
flags
);
}
static
inline
void
atomic_clear_mask
(
unsigned
int
mask
,
atomic_t
*
v
)
{
unsigned
long
flags
;
local_irq_save_hw
(
flags
);
v
->
counter
&=
~
mask
;
local_irq_restore_hw
(
flags
);
}
static
inline
void
atomic_set_mask
(
unsigned
int
mask
,
atomic_t
*
v
)
{
unsigned
long
flags
;
local_irq_save_hw
(
flags
);
v
->
counter
|=
mask
;
local_irq_restore_hw
(
flags
);
}
/* Atomic operations are already serializing */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#endif
/* !CONFIG_SMP */
#define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
#define atomic_dec_return(v) atomic_sub_return(1,(v))
#define atomic_inc_return(v) atomic_add_return(1,(v))
...
...
@@ -210,4 +113,6 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
#include <asm-generic/atomic-long.h>
#endif
/* __ARCH_BLACKFIN_ATOMIC __ */
#endif
#endif
arch/blackfin/include/asm/bitops.h
浏览文件 @
3d150630
#ifndef _BLACKFIN_BITOPS_H
#define _BLACKFIN_BITOPS_H
/*
* Copyright 1992, Linus Torvalds.
*/
#include <linux/compiler.h>
#include <asm/byteorder.h>
/* swab32 */
#ifdef __KERNEL__
#ifndef CONFIG_SMP
# include <asm-generic/bitops.h>
#else
#ifndef _LINUX_BITOPS_H
#error only <linux/bitops.h> can be included directly
#endif
#include <linux/compiler.h>
#include <asm/byteorder.h>
/* swab32 */
#include <asm-generic/bitops/ffs.h>
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/sched.h>
#include <asm-generic/bitops/ffz.h>
#ifdef CONFIG_SMP
#include <linux/linkage.h>
asmlinkage
int
__raw_bit_set_asm
(
volatile
unsigned
long
*
addr
,
int
nr
);
...
...
@@ -79,189 +75,13 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
return
__raw_bit_test_toggle_asm
(
a
,
nr
&
0x1f
);
}
#else
/* !CONFIG_SMP */
#include <asm/system.h>
/* save_flags */
static
inline
void
set_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
*
a
=
(
int
*
)
addr
;
int
mask
;
unsigned
long
flags
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
local_irq_save_hw
(
flags
);
*
a
|=
mask
;
local_irq_restore_hw
(
flags
);
}
static
inline
void
clear_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
*
a
=
(
int
*
)
addr
;
int
mask
;
unsigned
long
flags
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
local_irq_save_hw
(
flags
);
*
a
&=
~
mask
;
local_irq_restore_hw
(
flags
);
}
static
inline
void
change_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
mask
;
unsigned
long
flags
;
unsigned
long
*
ADDR
=
(
unsigned
long
*
)
addr
;
ADDR
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
31
);
local_irq_save_hw
(
flags
);
*
ADDR
^=
mask
;
local_irq_restore_hw
(
flags
);
}
static
inline
int
test_and_set_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
mask
,
retval
;
volatile
unsigned
int
*
a
=
(
volatile
unsigned
int
*
)
addr
;
unsigned
long
flags
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
local_irq_save_hw
(
flags
);
retval
=
(
mask
&
*
a
)
!=
0
;
*
a
|=
mask
;
local_irq_restore_hw
(
flags
);
return
retval
;
}
static
inline
int
test_and_clear_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
mask
,
retval
;
volatile
unsigned
int
*
a
=
(
volatile
unsigned
int
*
)
addr
;
unsigned
long
flags
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
local_irq_save_hw
(
flags
);
retval
=
(
mask
&
*
a
)
!=
0
;
*
a
&=
~
mask
;
local_irq_restore_hw
(
flags
);
return
retval
;
}
static
inline
int
test_and_change_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
mask
,
retval
;
volatile
unsigned
int
*
a
=
(
volatile
unsigned
int
*
)
addr
;
unsigned
long
flags
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
local_irq_save_hw
(
flags
);
retval
=
(
mask
&
*
a
)
!=
0
;
*
a
^=
mask
;
local_irq_restore_hw
(
flags
);
return
retval
;
}
#endif
/* CONFIG_SMP */
/*
* clear_bit() doesn't provide any barrier for the compiler.
*/
#define smp_mb__before_clear_bit() barrier()
#define smp_mb__after_clear_bit() barrier()
static
inline
void
__set_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
*
a
=
(
int
*
)
addr
;
int
mask
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
*
a
|=
mask
;
}
static
inline
void
__clear_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
*
a
=
(
int
*
)
addr
;
int
mask
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
*
a
&=
~
mask
;
}
static
inline
void
__change_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
mask
;
unsigned
long
*
ADDR
=
(
unsigned
long
*
)
addr
;
ADDR
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
31
);
*
ADDR
^=
mask
;
}
static
inline
int
__test_and_set_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
mask
,
retval
;
volatile
unsigned
int
*
a
=
(
volatile
unsigned
int
*
)
addr
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
retval
=
(
mask
&
*
a
)
!=
0
;
*
a
|=
mask
;
return
retval
;
}
static
inline
int
__test_and_clear_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
mask
,
retval
;
volatile
unsigned
int
*
a
=
(
volatile
unsigned
int
*
)
addr
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
retval
=
(
mask
&
*
a
)
!=
0
;
*
a
&=
~
mask
;
return
retval
;
}
static
inline
int
__test_and_change_bit
(
int
nr
,
volatile
unsigned
long
*
addr
)
{
int
mask
,
retval
;
volatile
unsigned
int
*
a
=
(
volatile
unsigned
int
*
)
addr
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
retval
=
(
mask
&
*
a
)
!=
0
;
*
a
^=
mask
;
return
retval
;
}
static
inline
int
__test_bit
(
int
nr
,
const
void
*
addr
)
{
int
*
a
=
(
int
*
)
addr
;
int
mask
;
a
+=
nr
>>
5
;
mask
=
1
<<
(
nr
&
0x1f
);
return
((
mask
&
*
a
)
!=
0
);
}
#ifndef CONFIG_SMP
/*
* This routine doesn't need irq save and restore ops in UP
* context.
*/
static
inline
int
test_bit
(
int
nr
,
const
void
*
addr
)
{
return
__test_bit
(
nr
,
addr
);
}
#endif
#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/find.h>
#include <asm-generic/bitops/hweight.h>
...
...
@@ -272,10 +92,10 @@ static inline int test_bit(int nr, const void *addr)
#include <asm-generic/bitops/minix.h>
#endif
/* __KERNEL__ */
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#endif
/* CONFIG_SMP */
#endif
/* _BLACKFIN_BITOPS_H */
arch/blackfin/include/asm/mutex.h
浏览文件 @
3d150630
...
...
@@ -10,7 +10,7 @@
#define _ASM_MUTEX_H
#ifndef CONFIG_SMP
#include <asm-generic/mutex
-dec
.h>
#include <asm-generic/mutex.h>
#else
static
inline
void
...
...
arch/blackfin/include/asm/spinlock.h
浏览文件 @
3d150630
#ifndef __BFIN_SPINLOCK_H
#define __BFIN_SPINLOCK_H
#ifndef CONFIG_SMP
# include <asm-generic/spinlock.h>
#else
#include <asm/atomic.h>
asmlinkage
int
__raw_spin_is_locked_asm
(
volatile
int
*
ptr
);
...
...
@@ -86,4 +90,6 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw)
#define _raw_read_relax(lock) cpu_relax()
#define _raw_write_relax(lock) cpu_relax()
#endif
#endif
/* !__BFIN_SPINLOCK_H */
arch/blackfin/include/asm/swab.h
浏览文件 @
3d150630
...
...
@@ -2,11 +2,7 @@
#define _BLACKFIN_SWAB_H
#include <linux/types.h>
#include <linux/compiler.h>
#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
# define __SWAB_64_THRU_32__
#endif
#include <asm-generic/swab.h>
#ifdef __GNUC__
...
...
arch/blackfin/include/asm/unaligned.h
浏览文件 @
3d150630
#ifndef _ASM_BLACKFIN_UNALIGNED_H
#define _ASM_BLACKFIN_UNALIGNED_H
#include <linux/unaligned/le_struct.h>
#include <linux/unaligned/be_byteshift.h>
#include <linux/unaligned/generic.h>
#define get_unaligned __get_unaligned_le
#define put_unaligned __put_unaligned_le
#endif
/* _ASM_BLACKFIN_UNALIGNED_H */
#include <asm-generic/unaligned.h>
编辑
预览
Markdown
is supported
0%
请重试
或
添加新附件
.
添加附件
取消
You are about to add
0
people
to the discussion. Proceed with caution.
先完成此消息的编辑!
取消
想要评论请
注册
或
登录