提交 7c8e0181 编写于 作者: C Christoph Lameter 提交者: Linus Torvalds

mm: replace __get_cpu_var uses with this_cpu_ptr

Replace places where __get_cpu_var() is used for an address calculation
with this_cpu_ptr().
Signed-off-by: NChristoph Lameter <cl@linux.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: NAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: NLinus Torvalds <torvalds@linux-foundation.org>
上级 dc6f6c97
...@@ -194,7 +194,7 @@ radix_tree_node_alloc(struct radix_tree_root *root) ...@@ -194,7 +194,7 @@ radix_tree_node_alloc(struct radix_tree_root *root)
* succeed in getting a node here (and never reach * succeed in getting a node here (and never reach
* kmem_cache_alloc) * kmem_cache_alloc)
*/ */
rtp = &__get_cpu_var(radix_tree_preloads); rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr) { if (rtp->nr) {
ret = rtp->nodes[rtp->nr - 1]; ret = rtp->nodes[rtp->nr - 1];
rtp->nodes[rtp->nr - 1] = NULL; rtp->nodes[rtp->nr - 1] = NULL;
...@@ -250,14 +250,14 @@ static int __radix_tree_preload(gfp_t gfp_mask) ...@@ -250,14 +250,14 @@ static int __radix_tree_preload(gfp_t gfp_mask)
int ret = -ENOMEM; int ret = -ENOMEM;
preempt_disable(); preempt_disable();
rtp = &__get_cpu_var(radix_tree_preloads); rtp = this_cpu_ptr(&radix_tree_preloads);
while (rtp->nr < ARRAY_SIZE(rtp->nodes)) { while (rtp->nr < ARRAY_SIZE(rtp->nodes)) {
preempt_enable(); preempt_enable();
node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask); node = kmem_cache_alloc(radix_tree_node_cachep, gfp_mask);
if (node == NULL) if (node == NULL)
goto out; goto out;
preempt_disable(); preempt_disable();
rtp = &__get_cpu_var(radix_tree_preloads); rtp = this_cpu_ptr(&radix_tree_preloads);
if (rtp->nr < ARRAY_SIZE(rtp->nodes)) if (rtp->nr < ARRAY_SIZE(rtp->nodes))
rtp->nodes[rtp->nr++] = node; rtp->nodes[rtp->nr++] = node;
else else
......
...@@ -2436,7 +2436,7 @@ static void drain_stock(struct memcg_stock_pcp *stock) ...@@ -2436,7 +2436,7 @@ static void drain_stock(struct memcg_stock_pcp *stock)
*/ */
static void drain_local_stock(struct work_struct *dummy) static void drain_local_stock(struct work_struct *dummy)
{ {
struct memcg_stock_pcp *stock = &__get_cpu_var(memcg_stock); struct memcg_stock_pcp *stock = this_cpu_ptr(&memcg_stock);
drain_stock(stock); drain_stock(stock);
clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags); clear_bit(FLUSHING_CACHED_CHARGE, &stock->flags);
} }
......
...@@ -1298,7 +1298,7 @@ static void memory_failure_work_func(struct work_struct *work) ...@@ -1298,7 +1298,7 @@ static void memory_failure_work_func(struct work_struct *work)
unsigned long proc_flags; unsigned long proc_flags;
int gotten; int gotten;
mf_cpu = &__get_cpu_var(memory_failure_cpu); mf_cpu = this_cpu_ptr(&memory_failure_cpu);
for (;;) { for (;;) {
spin_lock_irqsave(&mf_cpu->lock, proc_flags); spin_lock_irqsave(&mf_cpu->lock, proc_flags);
gotten = kfifo_get(&mf_cpu->fifo, &entry); gotten = kfifo_get(&mf_cpu->fifo, &entry);
......
...@@ -1623,7 +1623,7 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping) ...@@ -1623,7 +1623,7 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)
* 1000+ tasks, all of them start dirtying pages at exactly the same * 1000+ tasks, all of them start dirtying pages at exactly the same
* time, hence all honoured too large initial task->nr_dirtied_pause. * time, hence all honoured too large initial task->nr_dirtied_pause.
*/ */
p = &__get_cpu_var(bdp_ratelimits); p = this_cpu_ptr(&bdp_ratelimits);
if (unlikely(current->nr_dirtied >= ratelimit)) if (unlikely(current->nr_dirtied >= ratelimit))
*p = 0; *p = 0;
else if (unlikely(*p >= ratelimit_pages)) { else if (unlikely(*p >= ratelimit_pages)) {
...@@ -1635,7 +1635,7 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping) ...@@ -1635,7 +1635,7 @@ void balance_dirty_pages_ratelimited(struct address_space *mapping)
* short-lived tasks (eg. gcc invocations in a kernel build) escaping * short-lived tasks (eg. gcc invocations in a kernel build) escaping
* the dirty throttling and livelock other long-run dirtiers. * the dirty throttling and livelock other long-run dirtiers.
*/ */
p = &__get_cpu_var(dirty_throttle_leaks); p = this_cpu_ptr(&dirty_throttle_leaks);
if (*p > 0 && current->nr_dirtied < ratelimit) { if (*p > 0 && current->nr_dirtied < ratelimit) {
unsigned long nr_pages_dirtied; unsigned long nr_pages_dirtied;
nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
......
...@@ -2209,7 +2209,7 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags, ...@@ -2209,7 +2209,7 @@ static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
page = new_slab(s, flags, node); page = new_slab(s, flags, node);
if (page) { if (page) {
c = __this_cpu_ptr(s->cpu_slab); c = raw_cpu_ptr(s->cpu_slab);
if (c->page) if (c->page)
flush_slab(s, c); flush_slab(s, c);
...@@ -2425,7 +2425,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s, ...@@ -2425,7 +2425,7 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
* and the retrieval of the tid. * and the retrieval of the tid.
*/ */
preempt_disable(); preempt_disable();
c = __this_cpu_ptr(s->cpu_slab); c = this_cpu_ptr(s->cpu_slab);
/* /*
* The transaction ids are globally unique per cpu and per operation on * The transaction ids are globally unique per cpu and per operation on
...@@ -2681,7 +2681,7 @@ static __always_inline void slab_free(struct kmem_cache *s, ...@@ -2681,7 +2681,7 @@ static __always_inline void slab_free(struct kmem_cache *s,
* during the cmpxchg then the free will succedd. * during the cmpxchg then the free will succedd.
*/ */
preempt_disable(); preempt_disable();
c = __this_cpu_ptr(s->cpu_slab); c = this_cpu_ptr(s->cpu_slab);
tid = c->tid; tid = c->tid;
preempt_enable(); preempt_enable();
......
...@@ -441,7 +441,7 @@ void rotate_reclaimable_page(struct page *page) ...@@ -441,7 +441,7 @@ void rotate_reclaimable_page(struct page *page)
page_cache_get(page); page_cache_get(page);
local_irq_save(flags); local_irq_save(flags);
pvec = &__get_cpu_var(lru_rotate_pvecs); pvec = this_cpu_ptr(&lru_rotate_pvecs);
if (!pagevec_add(pvec, page)) if (!pagevec_add(pvec, page))
pagevec_move_tail(pvec); pagevec_move_tail(pvec);
local_irq_restore(flags); local_irq_restore(flags);
......
...@@ -1496,7 +1496,7 @@ void vfree(const void *addr) ...@@ -1496,7 +1496,7 @@ void vfree(const void *addr)
if (!addr) if (!addr)
return; return;
if (unlikely(in_interrupt())) { if (unlikely(in_interrupt())) {
struct vfree_deferred *p = &__get_cpu_var(vfree_deferred); struct vfree_deferred *p = this_cpu_ptr(&vfree_deferred);
if (llist_add((struct llist_node *)addr, &p->list)) if (llist_add((struct llist_node *)addr, &p->list))
schedule_work(&p->wq); schedule_work(&p->wq);
} else } else
......
...@@ -489,7 +489,7 @@ static void refresh_cpu_vm_stats(void) ...@@ -489,7 +489,7 @@ static void refresh_cpu_vm_stats(void)
continue; continue;
if (__this_cpu_read(p->pcp.count)) if (__this_cpu_read(p->pcp.count))
drain_zone_pages(zone, __this_cpu_ptr(&p->pcp)); drain_zone_pages(zone, this_cpu_ptr(&p->pcp));
#endif #endif
} }
fold_diff(global_diff); fold_diff(global_diff);
...@@ -1230,7 +1230,7 @@ int sysctl_stat_interval __read_mostly = HZ; ...@@ -1230,7 +1230,7 @@ int sysctl_stat_interval __read_mostly = HZ;
static void vmstat_update(struct work_struct *w) static void vmstat_update(struct work_struct *w)
{ {
refresh_cpu_vm_stats(); refresh_cpu_vm_stats();
schedule_delayed_work(&__get_cpu_var(vmstat_work), schedule_delayed_work(this_cpu_ptr(&vmstat_work),
round_jiffies_relative(sysctl_stat_interval)); round_jiffies_relative(sysctl_stat_interval));
} }
......
...@@ -1082,7 +1082,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle) ...@@ -1082,7 +1082,7 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
class = &pool->size_class[class_idx]; class = &pool->size_class[class_idx];
off = obj_idx_to_offset(page, obj_idx, class->size); off = obj_idx_to_offset(page, obj_idx, class->size);
area = &__get_cpu_var(zs_map_area); area = this_cpu_ptr(&zs_map_area);
if (off + class->size <= PAGE_SIZE) if (off + class->size <= PAGE_SIZE)
kunmap_atomic(area->vm_addr); kunmap_atomic(area->vm_addr);
else { else {
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册