diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b72d5a4609031445f37de8ffbebfb7eec33c7311..072652d94d9f5afe908f982c1acce9339bfbfc32 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -585,15 +585,15 @@ static inline void rps_reset_sock_flow(struct rps_sock_flow_table *table, table->ents[hash & table->mask] = RPS_NO_CPU; } -extern struct rps_sock_flow_table *rps_sock_flow_table; +extern struct rps_sock_flow_table __rcu *rps_sock_flow_table; /* This structure contains an instance of an RX queue. */ struct netdev_rx_queue { - struct rps_map *rps_map; - struct rps_dev_flow_table *rps_flow_table; - struct kobject kobj; - struct netdev_rx_queue *first; - atomic_t count; + struct rps_map __rcu *rps_map; + struct rps_dev_flow_table __rcu *rps_flow_table; + struct kobject kobj; + struct netdev_rx_queue *first; + atomic_t count; } ____cacheline_aligned_in_smp; #endif /* CONFIG_RPS */ diff --git a/net/core/dev.c b/net/core/dev.c index 365f7f5077e4bc02c9c6e05e9a7c80274d37f760..e8a8dc19365b18a3b77ac5eacdaa1ba9568f8aaf 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -2413,7 +2413,7 @@ EXPORT_SYMBOL(__skb_get_rxhash); #ifdef CONFIG_RPS /* One global table that all flow-based protocols share. */ -struct rps_sock_flow_table *rps_sock_flow_table __read_mostly; +struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly; EXPORT_SYMBOL(rps_sock_flow_table); /* @@ -2425,7 +2425,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, struct rps_dev_flow **rflowp) { struct netdev_rx_queue *rxqueue; - struct rps_map *map = NULL; + struct rps_map *map; struct rps_dev_flow_table *flow_table; struct rps_sock_flow_table *sock_flow_table; int cpu = -1; @@ -2444,15 +2444,15 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, } else rxqueue = dev->_rx; - if (rxqueue->rps_map) { - map = rcu_dereference(rxqueue->rps_map); - if (map && map->len == 1) { + map = rcu_dereference(rxqueue->rps_map); + if (map) { + if (map->len == 1) { tcpu = map->cpus[0]; if (cpu_online(tcpu)) cpu = tcpu; goto done; } - } else if (!rxqueue->rps_flow_table) { + } else if (!rcu_dereference_raw(rxqueue->rps_flow_table)) { goto done; } diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index b143173e3eb2bd9259e1c0122800848146b4c136..a5ff5a89f376bb1299dec78fa34abe9480ae01f1 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -598,7 +598,8 @@ static ssize_t store_rps_map(struct netdev_rx_queue *queue, } spin_lock(&rps_map_lock); - old_map = queue->rps_map; + old_map = rcu_dereference_protected(queue->rps_map, + lockdep_is_held(&rps_map_lock)); rcu_assign_pointer(queue->rps_map, map); spin_unlock(&rps_map_lock); @@ -677,7 +678,8 @@ static ssize_t store_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, table = NULL; spin_lock(&rps_dev_flow_lock); - old_table = queue->rps_flow_table; + old_table = rcu_dereference_protected(queue->rps_flow_table, + lockdep_is_held(&rps_dev_flow_lock)); rcu_assign_pointer(queue->rps_flow_table, table); spin_unlock(&rps_dev_flow_lock); @@ -705,13 +707,17 @@ static void rx_queue_release(struct kobject *kobj) { struct netdev_rx_queue *queue = to_rx_queue(kobj); struct netdev_rx_queue *first = queue->first; + struct rps_map *map; + struct rps_dev_flow_table *flow_table; - if (queue->rps_map) - call_rcu(&queue->rps_map->rcu, rps_map_release); - if (queue->rps_flow_table) - call_rcu(&queue->rps_flow_table->rcu, - rps_dev_flow_table_release); + map = rcu_dereference_raw(queue->rps_map); + if (map) + call_rcu(&map->rcu, rps_map_release); + + flow_table = rcu_dereference_raw(queue->rps_flow_table); + if (flow_table) + call_rcu(&flow_table->rcu, rps_dev_flow_table_release); if (atomic_dec_and_test(&first->count)) kfree(first); diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 01eee5d984be4b6d838357a56e0211508d0e31ba..385b6095fdc4b1da8540c879d3a5c450954cb7f7 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -34,7 +34,8 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write, mutex_lock(&sock_flow_mutex); - orig_sock_table = rps_sock_flow_table; + orig_sock_table = rcu_dereference_protected(rps_sock_flow_table, + lockdep_is_held(&sock_flow_mutex)); size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0; ret = proc_dointvec(&tmp, write, buffer, lenp, ppos);