提交 288a60cf 编写于 作者: C Chris Zankel 提交者: Linus Torvalds

[PATCH] xtensa: remove io_remap_page_range and minor clean-ups

Remove io_remap_page_range() from all of Linux 2.6.x (as requested and
suggested by Randy Dunlap) and minor clean-ups.
Signed-off-by: NChris Zankel <chris@zankel.net>
Signed-off-by: NAndrew Morton <akpm@osdl.org>
Signed-off-by: NLinus Torvalds <torvalds@osdl.org>
上级 fac97ae0
...@@ -402,8 +402,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, ...@@ -402,8 +402,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
__pci_mmap_set_flags(dev, vma, mmap_state); __pci_mmap_set_flags(dev, vma, mmap_state);
__pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine); __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);
ret = io_remap_page_range(vma, vma->vm_start, vma->vm_pgoff<<PAGE_SHIFT, ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vma->vm_end - vma->vm_start, vma->vm_page_prot); vma->vm_end - vma->vm_start,vma->vm_page_prot);
return ret; return ret;
} }
......
...@@ -39,7 +39,7 @@ _F(int, pcibios_fixup, (void), { return 0; }); ...@@ -39,7 +39,7 @@ _F(int, pcibios_fixup, (void), { return 0; });
_F(int, get_rtc_time, (time_t* t), { return 0; }); _F(int, get_rtc_time, (time_t* t), { return 0; });
_F(int, set_rtc_time, (time_t t), { return 0; }); _F(int, set_rtc_time, (time_t t), { return 0; });
#if CONFIG_XTENSA_CALIBRATE_CCOUNT #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
_F(void, calibrate_ccount, (void), _F(void, calibrate_ccount, (void),
{ {
printk ("ERROR: Cannot calibrate cpu frequency! Assuming 100MHz.\n"); printk ("ERROR: Cannot calibrate cpu frequency! Assuming 100MHz.\n");
......
...@@ -457,7 +457,7 @@ int ...@@ -457,7 +457,7 @@ int
dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r) dump_task_fpu(struct pt_regs *regs, struct task_struct *task, elf_fpregset_t *r)
{ {
/* see asm/coprocessor.h for this magic number 16 */ /* see asm/coprocessor.h for this magic number 16 */
#if TOTAL_CPEXTRA_SIZE > 16 #if XTENSA_CP_EXTRA_SIZE > 16
do_save_fpregs (r, regs, task); do_save_fpregs (r, regs, task);
/* For now, bit 16 means some extra state may be present: */ /* For now, bit 16 means some extra state may be present: */
......
...@@ -304,7 +304,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -304,7 +304,7 @@ void __init setup_arch(char **cmdline_p)
# endif # endif
#endif #endif
#if CONFIG_PCI #ifdef CONFIG_PCI
platform_pcibios_init(); platform_pcibios_init();
#endif #endif
} }
......
...@@ -182,7 +182,7 @@ restore_cpextra (struct _cpstate *buf) ...@@ -182,7 +182,7 @@ restore_cpextra (struct _cpstate *buf)
struct task_struct *tsk = current; struct task_struct *tsk = current;
release_all_cp(tsk); release_all_cp(tsk);
return __copy_from_user(tsk->thread.cpextra, buf, TOTAL_CPEXTRA_SIZE); return __copy_from_user(tsk->thread.cpextra, buf, XTENSA_CP_EXTRA_SIZE);
#endif #endif
return 0; return 0;
} }
......
...@@ -68,7 +68,7 @@ void __init time_init(void) ...@@ -68,7 +68,7 @@ void __init time_init(void)
* speed for the CALIBRATE. * speed for the CALIBRATE.
*/ */
#if CONFIG_XTENSA_CALIBRATE_CCOUNT #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT
printk("Calibrating CPU frequency "); printk("Calibrating CPU frequency ");
platform_calibrate_ccount(); platform_calibrate_ccount();
printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ), printk("%d.%02d MHz\n", (int)ccount_per_jiffy/(1000000/HZ),
......
...@@ -239,7 +239,7 @@ void __init mem_init(void) ...@@ -239,7 +239,7 @@ void __init mem_init(void)
high_memory = (void *) __va(max_mapnr << PAGE_SHIFT); high_memory = (void *) __va(max_mapnr << PAGE_SHIFT);
highmemsize = 0; highmemsize = 0;
#if CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
#error HIGHGMEM not implemented in init.c #error HIGHGMEM not implemented in init.c
#endif #endif
......
...@@ -22,7 +22,7 @@ typedef struct { volatile int counter; } atomic_t; ...@@ -22,7 +22,7 @@ typedef struct { volatile int counter; } atomic_t;
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/system.h> #include <asm/system.h>
#define ATOMIC_INIT(i) ( (atomic_t) { (i) } ) #define ATOMIC_INIT(i) { (i) }
/* /*
* This Xtensa implementation assumes that the right mechanism * This Xtensa implementation assumes that the right mechanism
......
...@@ -174,7 +174,7 @@ static __inline__ int test_bit(int nr, const volatile void *addr) ...@@ -174,7 +174,7 @@ static __inline__ int test_bit(int nr, const volatile void *addr)
return 1UL & (((const volatile unsigned int *)addr)[nr>>5] >> (nr&31)); return 1UL & (((const volatile unsigned int *)addr)[nr>>5] >> (nr&31));
} }
#if XCHAL_HAVE_NSAU #if XCHAL_HAVE_NSA
static __inline__ int __cntlz (unsigned long x) static __inline__ int __cntlz (unsigned long x)
{ {
......
...@@ -23,6 +23,7 @@ typedef struct { ...@@ -23,6 +23,7 @@ typedef struct {
unsigned int __nmi_count; /* arch dependent */ unsigned int __nmi_count; /* arch dependent */
} ____cacheline_aligned irq_cpustat_t; } ____cacheline_aligned irq_cpustat_t;
void ack_bad_irq(unsigned int irq);
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
#endif /* _XTENSA_HARDIRQ_H */ #endif /* _XTENSA_HARDIRQ_H */
...@@ -20,28 +20,19 @@ struct semaphore { ...@@ -20,28 +20,19 @@ struct semaphore {
atomic_t count; atomic_t count;
int sleepers; int sleepers;
wait_queue_head_t wait; wait_queue_head_t wait;
#if WAITQUEUE_DEBUG
long __magic;
#endif
}; };
#if WAITQUEUE_DEBUG #define __SEMAPHORE_INITIALIZER(name,n) \
# define __SEM_DEBUG_INIT(name) \ { \
, (int)&(name).__magic .count = ATOMIC_INIT(n), \
#else .sleepers = 0, \
# define __SEM_DEBUG_INIT(name) .wait = __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
#endif }
#define __SEMAPHORE_INITIALIZER(name,count) \
{ ATOMIC_INIT(count), \
0, \
__WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
__SEM_DEBUG_INIT(name) }
#define __MUTEX_INITIALIZER(name) \ #define __MUTEX_INITIALIZER(name) \
__SEMAPHORE_INITIALIZER(name, 1) __SEMAPHORE_INITIALIZER(name, 1)
#define __DECLARE_SEMAPHORE_GENERIC(name,count) \ #define __DECLARE_SEMAPHORE_GENERIC(name,count) \
struct semaphore name = __SEMAPHORE_INITIALIZER(name,count) struct semaphore name = __SEMAPHORE_INITIALIZER(name,count)
#define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1) #define DECLARE_MUTEX(name) __DECLARE_SEMAPHORE_GENERIC(name,1)
...@@ -49,17 +40,8 @@ struct semaphore { ...@@ -49,17 +40,8 @@ struct semaphore {
static inline void sema_init (struct semaphore *sem, int val) static inline void sema_init (struct semaphore *sem, int val)
{ {
/*
* *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
*
* i'd rather use the more flexible initialization above, but sadly
* GCC 2.7.2.3 emits a bogus warning. EGCS doesnt. Oh well.
*/
atomic_set(&sem->count, val); atomic_set(&sem->count, val);
init_waitqueue_head(&sem->wait); init_waitqueue_head(&sem->wait);
#if WAITQUEUE_DEBUG
sem->__magic = (int)&sem->__magic;
#endif
} }
static inline void init_MUTEX (struct semaphore *sem) static inline void init_MUTEX (struct semaphore *sem)
...@@ -81,9 +63,7 @@ extern spinlock_t semaphore_wake_lock; ...@@ -81,9 +63,7 @@ extern spinlock_t semaphore_wake_lock;
static inline void down(struct semaphore * sem) static inline void down(struct semaphore * sem)
{ {
#if WAITQUEUE_DEBUG might_sleep();
CHECK_MAGIC(sem->__magic);
#endif
if (atomic_sub_return(1, &sem->count) < 0) if (atomic_sub_return(1, &sem->count) < 0)
__down(sem); __down(sem);
...@@ -92,9 +72,8 @@ static inline void down(struct semaphore * sem) ...@@ -92,9 +72,8 @@ static inline void down(struct semaphore * sem)
static inline int down_interruptible(struct semaphore * sem) static inline int down_interruptible(struct semaphore * sem)
{ {
int ret = 0; int ret = 0;
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic); might_sleep();
#endif
if (atomic_sub_return(1, &sem->count) < 0) if (atomic_sub_return(1, &sem->count) < 0)
ret = __down_interruptible(sem); ret = __down_interruptible(sem);
...@@ -104,9 +83,6 @@ static inline int down_interruptible(struct semaphore * sem) ...@@ -104,9 +83,6 @@ static inline int down_interruptible(struct semaphore * sem)
static inline int down_trylock(struct semaphore * sem) static inline int down_trylock(struct semaphore * sem)
{ {
int ret = 0; int ret = 0;
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
if (atomic_sub_return(1, &sem->count) < 0) if (atomic_sub_return(1, &sem->count) < 0)
ret = __down_trylock(sem); ret = __down_trylock(sem);
...@@ -119,9 +95,6 @@ static inline int down_trylock(struct semaphore * sem) ...@@ -119,9 +95,6 @@ static inline int down_trylock(struct semaphore * sem)
*/ */
static inline void up(struct semaphore * sem) static inline void up(struct semaphore * sem)
{ {
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
if (atomic_add_return(1, &sem->count) <= 0) if (atomic_add_return(1, &sem->count) <= 0)
__up(sem); __up(sem);
} }
......
...@@ -189,20 +189,6 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) ...@@ -189,20 +189,6 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val)
#define tas(ptr) (xchg((ptr),1)) #define tas(ptr) (xchg((ptr),1))
#if ( __XCC__ == 1 )
/* xt-xcc processes __inline__ differently than xt-gcc and decides to
* insert an out-of-line copy of function __xchg. This presents the
* unresolved symbol at link time of __xchg_called_with_bad_pointer,
* even though such a function would never be called at run-time.
* xt-gcc always inlines __xchg, and optimizes away the undefined
* bad_pointer function.
*/
#define xchg(ptr,x) xchg_u32(ptr,x)
#else /* assume xt-gcc */
#define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
/* /*
...@@ -224,8 +210,6 @@ __xchg(unsigned long x, volatile void * ptr, int size) ...@@ -224,8 +210,6 @@ __xchg(unsigned long x, volatile void * ptr, int size)
return x; return x;
} }
#endif
extern void set_except_vector(int n, void *addr); extern void set_except_vector(int n, void *addr);
static inline void spill_registers(void) static inline void spill_registers(void)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册