diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c index 93eb03e30fb45925c6ba183ebe88ac0a6b7d02ca..4666609fa65eec8d033080e6142232f7f1a94a3f 100644 --- a/drivers/staging/zsmalloc/zsmalloc-main.c +++ b/drivers/staging/zsmalloc/zsmalloc-main.c @@ -222,11 +222,9 @@ struct zs_pool { /* * By default, zsmalloc uses a copy-based object mapping method to access * allocations that span two pages. However, if a particular architecture - * 1) Implements local_flush_tlb_kernel_range() and 2) Performs VM mapping - * faster than copying, then it should be added here so that - * USE_PGTABLE_MAPPING is defined. This causes zsmalloc to use page table - * mapping rather than copying - * for object mapping. + * performs VM mapping faster than copying, then it should be added here + * so that USE_PGTABLE_MAPPING is defined. This causes zsmalloc to use + * page table mapping rather than copying for object mapping. */ #if defined(CONFIG_ARM) #define USE_PGTABLE_MAPPING @@ -663,7 +661,7 @@ static inline void __zs_unmap_object(struct mapping_area *area, flush_cache_vunmap(addr, end); unmap_kernel_range_noflush(addr, PAGE_SIZE * 2); - local_flush_tlb_kernel_range(addr, end); + flush_tlb_kernel_range(addr, end); } #else /* USE_PGTABLE_MAPPING */