提交 eefbab59 编写于 作者: L Linus Torvalds

Merge branch 'frv' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-frv

* 'frv' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-2.6-frv:
  FRV: Use generic show_interrupts()
  FRV: Convert genirq namespace
  frv: Select GENERIC_HARDIRQS_NO_DEPRECATED
  frv: Convert cpu irq_chip to new functions
  frv: Convert mb93493 irq_chip to new functions
  frv: Convert mb93093 irq_chip to new function
  frv: Convert mb93091 irq_chip to new functions
  frv: Fix typo from __do_IRQ overhaul
  frv: Remove stale irq_chip.end
  FRV: Do some cleanups
  FRV: Missing node arg in alloc_thread_info_node() macro
  NOMMU: implement access_remote_vm
  NOMMU: support SMP dynamic percpu_alloc
  NOMMU: percpu should use is_vmalloc_addr().
...@@ -363,7 +363,6 @@ menu "Power management options" ...@@ -363,7 +363,6 @@ menu "Power management options"
config ARCH_SUSPEND_POSSIBLE config ARCH_SUSPEND_POSSIBLE
def_bool y def_bool y
depends on !SMP
source kernel/power/Kconfig source kernel/power/Kconfig
endmenu endmenu
......
...@@ -45,21 +45,12 @@ do { \ ...@@ -45,21 +45,12 @@ do { \
#define wmb() asm volatile ("membar" : : :"memory") #define wmb() asm volatile ("membar" : : :"memory")
#define read_barrier_depends() do { } while (0) #define read_barrier_depends() do { } while (0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) \
do { xchg(&var, (value)); } while (0)
#else
#define smp_mb() barrier() #define smp_mb() barrier()
#define smp_rmb() barrier() #define smp_rmb() barrier()
#define smp_wmb() barrier() #define smp_wmb() barrier()
#define smp_read_barrier_depends() do {} while(0) #define smp_read_barrier_depends() do {} while(0)
#define set_mb(var, value) \ #define set_mb(var, value) \
do { var = (value); barrier(); } while (0) do { var = (value); barrier(); } while (0)
#endif
extern void die_if_kernel(const char *, ...) __attribute__((format(printf, 1, 2))); extern void die_if_kernel(const char *, ...) __attribute__((format(printf, 1, 2)));
extern void free_initmem(void); extern void free_initmem(void);
......
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
#define THREAD_SIZE 8192 #define THREAD_SIZE 8192
#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
/* /*
* low level task data that entry.S needs immediate access to * low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line * - this struct should fit entirely inside of one cache line
...@@ -87,7 +89,7 @@ register struct thread_info *__current_thread_info asm("gr15"); ...@@ -87,7 +89,7 @@ register struct thread_info *__current_thread_info asm("gr15");
#define alloc_thread_info_node(tsk, node) \ #define alloc_thread_info_node(tsk, node) \
kzalloc_node(THREAD_SIZE, GFP_KERNEL, node) kzalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#else #else
#define alloc_thread_info_node(tsk) \ #define alloc_thread_info_node(tsk, node) \
kmalloc_node(THREAD_SIZE, GFP_KERNEL, node) kmalloc_node(THREAD_SIZE, GFP_KERNEL, node)
#endif #endif
......
...@@ -47,7 +47,7 @@ static void frv_fpga_mask(struct irq_data *d) ...@@ -47,7 +47,7 @@ static void frv_fpga_mask(struct irq_data *d)
static void frv_fpga_ack(struct irq_data *d) static void frv_fpga_ack(struct irq_data *d)
{ {
__clr_IFR(1 << (irq - IRQ_BASE_FPGA)); __clr_IFR(1 << (d->irq - IRQ_BASE_FPGA));
} }
static void frv_fpga_mask_ack(struct irq_data *d) static void frv_fpga_mask_ack(struct irq_data *d)
......
...@@ -95,10 +95,27 @@ extern struct vm_struct *remove_vm_area(const void *addr); ...@@ -95,10 +95,27 @@ extern struct vm_struct *remove_vm_area(const void *addr);
extern int map_vm_area(struct vm_struct *area, pgprot_t prot, extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
struct page ***pages); struct page ***pages);
#ifdef CONFIG_MMU
extern int map_kernel_range_noflush(unsigned long start, unsigned long size, extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
pgprot_t prot, struct page **pages); pgprot_t prot, struct page **pages);
extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
extern void unmap_kernel_range(unsigned long addr, unsigned long size); extern void unmap_kernel_range(unsigned long addr, unsigned long size);
#else
static inline int
map_kernel_range_noflush(unsigned long start, unsigned long size,
pgprot_t prot, struct page **pages)
{
return size >> PAGE_SHIFT;
}
static inline void
unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
{
}
static inline void
unmap_kernel_range(unsigned long addr, unsigned long size)
{
}
#endif
/* Allocate/destroy a 'vmalloc' VM area. */ /* Allocate/destroy a 'vmalloc' VM area. */
extern struct vm_struct *alloc_vm_area(size_t size); extern struct vm_struct *alloc_vm_area(size_t size);
...@@ -116,11 +133,26 @@ extern struct vm_struct *vmlist; ...@@ -116,11 +133,26 @@ extern struct vm_struct *vmlist;
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
# ifdef CONFIG_MMU
struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms, const size_t *sizes, int nr_vms,
size_t align); size_t align);
void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
# else
static inline struct vm_struct **
pcpu_get_vm_areas(const unsigned long *offsets,
const size_t *sizes, int nr_vms,
size_t align)
{
return NULL;
}
static inline void
pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
{
}
# endif
#endif #endif
#endif /* _LINUX_VMALLOC_H */ #endif /* _LINUX_VMALLOC_H */
...@@ -1971,21 +1971,10 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ...@@ -1971,21 +1971,10 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
} }
EXPORT_SYMBOL(filemap_fault); EXPORT_SYMBOL(filemap_fault);
/* static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
* Access another process' address space. unsigned long addr, void *buf, int len, int write)
* - source/target buffer must be kernel space
*/
int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct mm_struct *mm;
if (addr + len < addr)
return 0;
mm = get_task_mm(tsk);
if (!mm)
return 0;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
...@@ -2010,6 +1999,43 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in ...@@ -2010,6 +1999,43 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
} }
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return len;
}
/**
* @access_remote_vm - access another process' address space
* @mm: the mm_struct of the target address space
* @addr: start address to access
* @buf: source or destination buffer
* @len: number of bytes to transfer
* @write: whether the access is a write
*
* The caller must hold a reference on @mm.
*/
int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, int write)
{
return __access_remote_vm(NULL, mm, addr, buf, len, write);
}
/*
* Access another process' address space.
* - source/target buffer must be kernel space
*/
int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
{
struct mm_struct *mm;
if (addr + len < addr)
return 0;
mm = get_task_mm(tsk);
if (!mm)
return 0;
len = __access_remote_vm(tsk, mm, addr, buf, len, write);
mmput(mm); mmput(mm);
return len; return len;
} }
......
...@@ -1008,8 +1008,7 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr) ...@@ -1008,8 +1008,7 @@ phys_addr_t per_cpu_ptr_to_phys(void *addr)
} }
if (in_first_chunk) { if (in_first_chunk) {
if ((unsigned long)addr < VMALLOC_START || if (!is_vmalloc_addr(addr))
(unsigned long)addr >= VMALLOC_END)
return __pa(addr); return __pa(addr);
else else
return page_to_phys(vmalloc_to_page(addr)); return page_to_phys(vmalloc_to_page(addr));
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册