提交 04c6b3e2 编写于 作者: M Max Filippov

xtensa: optimize local_flush_tlb_kernel_range

Don't flush whole TLB if only a small kernel range is requested.
Signed-off-by: NMax Filippov <jcmvbkbc@gmail.com>
上级 8585b316
...@@ -36,6 +36,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, ...@@ -36,6 +36,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long page); unsigned long page);
void local_flush_tlb_range(struct vm_area_struct *vma, void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -44,12 +45,7 @@ void flush_tlb_mm(struct mm_struct *); ...@@ -44,12 +45,7 @@ void flush_tlb_mm(struct mm_struct *);
void flush_tlb_page(struct vm_area_struct *, unsigned long); void flush_tlb_page(struct vm_area_struct *, unsigned long);
void flush_tlb_range(struct vm_area_struct *, unsigned long, void flush_tlb_range(struct vm_area_struct *, unsigned long,
unsigned long); unsigned long);
void flush_tlb_kernel_range(unsigned long start, unsigned long end);
static inline void flush_tlb_kernel_range(unsigned long start,
unsigned long end)
{
flush_tlb_all();
}
#else /* !CONFIG_SMP */ #else /* !CONFIG_SMP */
...@@ -58,7 +54,8 @@ static inline void flush_tlb_kernel_range(unsigned long start, ...@@ -58,7 +54,8 @@ static inline void flush_tlb_kernel_range(unsigned long start,
#define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page) #define flush_tlb_page(vma, page) local_flush_tlb_page(vma, page)
#define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, \ #define flush_tlb_range(vma, vmaddr, end) local_flush_tlb_range(vma, vmaddr, \
end) end)
#define flush_tlb_kernel_range(start, end) local_flush_tlb_all() #define flush_tlb_kernel_range(start, end) local_flush_tlb_kernel_range(start, \
end)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
...@@ -496,6 +496,21 @@ void flush_tlb_range(struct vm_area_struct *vma, ...@@ -496,6 +496,21 @@ void flush_tlb_range(struct vm_area_struct *vma,
on_each_cpu(ipi_flush_tlb_range, &fd, 1); on_each_cpu(ipi_flush_tlb_range, &fd, 1);
} }
static void ipi_flush_tlb_kernel_range(void *arg)
{
struct flush_data *fd = arg;
local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
struct flush_data fd = {
.addr1 = start,
.addr2 = end,
};
on_each_cpu(ipi_flush_tlb_kernel_range, &fd, 1);
}
/* Cache flush functions */ /* Cache flush functions */
static void ipi_flush_cache_all(void *arg) static void ipi_flush_cache_all(void *arg)
......
...@@ -149,6 +149,21 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) ...@@ -149,6 +149,21 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
local_irq_restore(flags); local_irq_restore(flags);
} }
void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
if (end > start && start >= TASK_SIZE && end <= PAGE_OFFSET &&
end - start < _TLB_ENTRIES << PAGE_SHIFT) {
start &= PAGE_MASK;
while (start < end) {
invalidate_itlb_mapping(start);
invalidate_dtlb_mapping(start);
start += PAGE_SIZE;
}
} else {
local_flush_tlb_all();
}
}
#ifdef CONFIG_DEBUG_TLB_SANITY #ifdef CONFIG_DEBUG_TLB_SANITY
static unsigned get_pte_for_vaddr(unsigned vaddr) static unsigned get_pte_for_vaddr(unsigned vaddr)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册