提交 903ceff7 编写于 作者: C Christoph Lameter 提交者: Tejun Heo

net: Replace get_cpu_var through this_cpu_ptr

Replace uses of get_cpu_var for address calculation through this_cpu_ptr.

Cc: netdev@vger.kernel.org
Cc: Eric Dumazet <edumazet@google.com>
Acked-by: NDavid S. Miller <davem@davemloft.net>
Signed-off-by: NChristoph Lameter <cl@linux.com>
Signed-off-by: NTejun Heo <tj@kernel.org>
上级 f7f66b05
...@@ -242,7 +242,7 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct, ...@@ -242,7 +242,7 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked); DECLARE_PER_CPU(struct nf_conn, nf_conntrack_untracked);
static inline struct nf_conn *nf_ct_untracked_get(void) static inline struct nf_conn *nf_ct_untracked_get(void)
{ {
return &__raw_get_cpu_var(nf_conntrack_untracked); return raw_cpu_ptr(&nf_conntrack_untracked);
} }
void nf_ct_untracked_status_or(unsigned long bits); void nf_ct_untracked_status_or(unsigned long bits);
......
...@@ -168,7 +168,7 @@ struct linux_xfrm_mib { ...@@ -168,7 +168,7 @@ struct linux_xfrm_mib {
#define SNMP_ADD_STATS64_BH(mib, field, addend) \ #define SNMP_ADD_STATS64_BH(mib, field, addend) \
do { \ do { \
__typeof__(*mib) *ptr = __this_cpu_ptr(mib); \ __typeof__(*mib) *ptr = raw_cpu_ptr(mib); \
u64_stats_update_begin(&ptr->syncp); \ u64_stats_update_begin(&ptr->syncp); \
ptr->mibs[field] += addend; \ ptr->mibs[field] += addend; \
u64_stats_update_end(&ptr->syncp); \ u64_stats_update_end(&ptr->syncp); \
...@@ -189,8 +189,8 @@ struct linux_xfrm_mib { ...@@ -189,8 +189,8 @@ struct linux_xfrm_mib {
#define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1) #define SNMP_INC_STATS64(mib, field) SNMP_ADD_STATS64(mib, field, 1)
#define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \ #define SNMP_UPD_PO_STATS64_BH(mib, basefield, addend) \
do { \ do { \
__typeof__(*mib) *ptr; \ __typeof__(*mib) *ptr; \
ptr = __this_cpu_ptr(mib); \ ptr = raw_cpu_ptr((mib)); \
u64_stats_update_begin(&ptr->syncp); \ u64_stats_update_begin(&ptr->syncp); \
ptr->mibs[basefield##PKTS]++; \ ptr->mibs[basefield##PKTS]++; \
ptr->mibs[basefield##OCTETS] += addend; \ ptr->mibs[basefield##OCTETS] += addend; \
......
...@@ -2153,7 +2153,7 @@ static inline void __netif_reschedule(struct Qdisc *q) ...@@ -2153,7 +2153,7 @@ static inline void __netif_reschedule(struct Qdisc *q)
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
sd = &__get_cpu_var(softnet_data); sd = this_cpu_ptr(&softnet_data);
q->next_sched = NULL; q->next_sched = NULL;
*sd->output_queue_tailp = q; *sd->output_queue_tailp = q;
sd->output_queue_tailp = &q->next_sched; sd->output_queue_tailp = &q->next_sched;
...@@ -3195,7 +3195,7 @@ static void rps_trigger_softirq(void *data) ...@@ -3195,7 +3195,7 @@ static void rps_trigger_softirq(void *data)
static int rps_ipi_queued(struct softnet_data *sd) static int rps_ipi_queued(struct softnet_data *sd)
{ {
#ifdef CONFIG_RPS #ifdef CONFIG_RPS
struct softnet_data *mysd = &__get_cpu_var(softnet_data); struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
if (sd != mysd) { if (sd != mysd) {
sd->rps_ipi_next = mysd->rps_ipi_list; sd->rps_ipi_next = mysd->rps_ipi_list;
...@@ -3222,7 +3222,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen) ...@@ -3222,7 +3222,7 @@ static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
if (qlen < (netdev_max_backlog >> 1)) if (qlen < (netdev_max_backlog >> 1))
return false; return false;
sd = &__get_cpu_var(softnet_data); sd = this_cpu_ptr(&softnet_data);
rcu_read_lock(); rcu_read_lock();
fl = rcu_dereference(sd->flow_limit); fl = rcu_dereference(sd->flow_limit);
...@@ -3369,7 +3369,7 @@ EXPORT_SYMBOL(netif_rx_ni); ...@@ -3369,7 +3369,7 @@ EXPORT_SYMBOL(netif_rx_ni);
static void net_tx_action(struct softirq_action *h) static void net_tx_action(struct softirq_action *h)
{ {
struct softnet_data *sd = &__get_cpu_var(softnet_data); struct softnet_data *sd = this_cpu_ptr(&softnet_data);
if (sd->completion_queue) { if (sd->completion_queue) {
struct sk_buff *clist; struct sk_buff *clist;
...@@ -3794,7 +3794,7 @@ EXPORT_SYMBOL(netif_receive_skb); ...@@ -3794,7 +3794,7 @@ EXPORT_SYMBOL(netif_receive_skb);
static void flush_backlog(void *arg) static void flush_backlog(void *arg)
{ {
struct net_device *dev = arg; struct net_device *dev = arg;
struct softnet_data *sd = &__get_cpu_var(softnet_data); struct softnet_data *sd = this_cpu_ptr(&softnet_data);
struct sk_buff *skb, *tmp; struct sk_buff *skb, *tmp;
rps_lock(sd); rps_lock(sd);
...@@ -4301,7 +4301,7 @@ void __napi_schedule(struct napi_struct *n) ...@@ -4301,7 +4301,7 @@ void __napi_schedule(struct napi_struct *n)
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
____napi_schedule(&__get_cpu_var(softnet_data), n); ____napi_schedule(this_cpu_ptr(&softnet_data), n);
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL(__napi_schedule); EXPORT_SYMBOL(__napi_schedule);
...@@ -4422,7 +4422,7 @@ EXPORT_SYMBOL(netif_napi_del); ...@@ -4422,7 +4422,7 @@ EXPORT_SYMBOL(netif_napi_del);
static void net_rx_action(struct softirq_action *h) static void net_rx_action(struct softirq_action *h)
{ {
struct softnet_data *sd = &__get_cpu_var(softnet_data); struct softnet_data *sd = this_cpu_ptr(&softnet_data);
unsigned long time_limit = jiffies + 2; unsigned long time_limit = jiffies + 2;
int budget = netdev_budget; int budget = netdev_budget;
void *have; void *have;
......
...@@ -146,7 +146,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location) ...@@ -146,7 +146,7 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
data = &__get_cpu_var(dm_cpu_data); data = this_cpu_ptr(&dm_cpu_data);
spin_lock(&data->lock); spin_lock(&data->lock);
dskb = data->skb; dskb = data->skb;
......
...@@ -345,7 +345,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) ...@@ -345,7 +345,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
unsigned long flags; unsigned long flags;
local_irq_save(flags); local_irq_save(flags);
nc = &__get_cpu_var(netdev_alloc_cache); nc = this_cpu_ptr(&netdev_alloc_cache);
if (unlikely(!nc->frag.page)) { if (unlikely(!nc->frag.page)) {
refill: refill:
for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) { for (order = NETDEV_FRAG_PAGE_MAX_ORDER; ;) {
......
...@@ -1311,7 +1311,7 @@ static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt) ...@@ -1311,7 +1311,7 @@ static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
if (rt_is_input_route(rt)) { if (rt_is_input_route(rt)) {
p = (struct rtable **)&nh->nh_rth_input; p = (struct rtable **)&nh->nh_rth_input;
} else { } else {
p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output); p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output);
} }
orig = *p; orig = *p;
...@@ -1939,7 +1939,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res, ...@@ -1939,7 +1939,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
do_cache = false; do_cache = false;
goto add; goto add;
} }
prth = __this_cpu_ptr(nh->nh_pcpu_rth_output); prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
} }
rth = rcu_dereference(*prth); rth = rcu_dereference(*prth);
if (rt_cache_valid(rth)) { if (rt_cache_valid(rth)) {
......
...@@ -40,7 +40,7 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, ...@@ -40,7 +40,7 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
net_get_random_once(syncookie_secret, sizeof(syncookie_secret)); net_get_random_once(syncookie_secret, sizeof(syncookie_secret));
tmp = __get_cpu_var(ipv4_cookie_scratch); tmp = this_cpu_ptr(ipv4_cookie_scratch);
memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c])); memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c]));
tmp[0] = (__force u32)saddr; tmp[0] = (__force u32)saddr;
tmp[1] = (__force u32)daddr; tmp[1] = (__force u32)daddr;
......
...@@ -3058,7 +3058,7 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) ...@@ -3058,7 +3058,7 @@ struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
local_bh_disable(); local_bh_disable();
p = ACCESS_ONCE(tcp_md5sig_pool); p = ACCESS_ONCE(tcp_md5sig_pool);
if (p) if (p)
return __this_cpu_ptr(p); return raw_cpu_ptr(p);
local_bh_enable(); local_bh_enable();
return NULL; return NULL;
......
...@@ -842,7 +842,7 @@ void tcp_wfree(struct sk_buff *skb) ...@@ -842,7 +842,7 @@ void tcp_wfree(struct sk_buff *skb)
/* queue this socket to tasklet queue */ /* queue this socket to tasklet queue */
local_irq_save(flags); local_irq_save(flags);
tsq = &__get_cpu_var(tsq_tasklet); tsq = this_cpu_ptr(&tsq_tasklet);
list_add(&tp->tsq_node, &tsq->head); list_add(&tp->tsq_node, &tsq->head);
tasklet_schedule(&tsq->tasklet); tasklet_schedule(&tsq->tasklet);
local_irq_restore(flags); local_irq_restore(flags);
......
...@@ -67,7 +67,7 @@ static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *dadd ...@@ -67,7 +67,7 @@ static u32 cookie_hash(const struct in6_addr *saddr, const struct in6_addr *dadd
net_get_random_once(syncookie6_secret, sizeof(syncookie6_secret)); net_get_random_once(syncookie6_secret, sizeof(syncookie6_secret));
tmp = __get_cpu_var(ipv6_cookie_scratch); tmp = this_cpu_ptr(ipv6_cookie_scratch);
/* /*
* we have 320 bits of information to hash, copy in the remaining * we have 320 bits of information to hash, copy in the remaining
......
...@@ -267,7 +267,7 @@ static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool) ...@@ -267,7 +267,7 @@ static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
unsigned long *flag; unsigned long *flag;
preempt_disable(); preempt_disable();
flag = &__get_cpu_var(clean_list_grace); flag = this_cpu_ptr(&clean_list_grace);
set_bit(CLEAN_LIST_BUSY_BIT, flag); set_bit(CLEAN_LIST_BUSY_BIT, flag);
ret = llist_del_first(&pool->clean_list); ret = llist_del_first(&pool->clean_list);
if (ret) if (ret)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册