提交 45c91490 编写于 作者: D David S. Miller

Merge branch 'team' ("add support for peer notifications and igmp rejoins for team")

Jiri Pirko says:

====================
The middle patch adjusts core infrastructure so the bonding code can be
generalized and reused by team.

v1->v2: using msecs_to_jiffies() as suggested by Eric

Jiri Pirko (3):
  team: add peer notification
  net: convert resend IGMP to notifier event
  team: add support for sending multicast rejoins
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -715,15 +715,6 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
return err;
}
static void __bond_resend_igmp_join_requests(struct net_device *dev)
{
struct in_device *in_dev;
in_dev = __in_dev_get_rcu(dev);
if (in_dev)
ip_mc_rejoin_groups(in_dev);
}
/*
* Retrieve the list of registered multicast addresses for the bonding
* device and retransmit an IGMP JOIN request to the current active
......@@ -731,33 +722,12 @@ static void __bond_resend_igmp_join_requests(struct net_device *dev)
*/
static void bond_resend_igmp_join_requests(struct bonding *bond)
{
struct net_device *bond_dev, *vlan_dev, *upper_dev;
struct vlan_entry *vlan;
read_lock(&bond->lock);
rcu_read_lock();
bond_dev = bond->dev;
/* rejoin all groups on bond device */
__bond_resend_igmp_join_requests(bond_dev);
/*
* if bond is enslaved to a bridge,
* then rejoin all groups on its master
*/
upper_dev = netdev_master_upper_dev_get_rcu(bond_dev);
if (upper_dev && upper_dev->priv_flags & IFF_EBRIDGE)
__bond_resend_igmp_join_requests(upper_dev);
/* rejoin all groups on vlan devices */
list_for_each_entry(vlan, &bond->vlan_list, vlan_list) {
vlan_dev = __vlan_find_dev_deep(bond_dev, htons(ETH_P_8021Q),
vlan->vlan_id);
if (vlan_dev)
__bond_resend_igmp_join_requests(vlan_dev);
if (!rtnl_trylock()) {
queue_delayed_work(bond->wq, &bond->mcast_work, 0);
return;
}
rcu_read_unlock();
call_netdevice_notifiers(NETDEV_RESEND_IGMP, bond->dev);
rtnl_unlock();
/* We use curr_slave_lock to protect against concurrent access to
* igmp_retrans from multiple running instances of this function and
......@@ -3234,6 +3204,10 @@ static int bond_slave_netdev_event(unsigned long event,
case NETDEV_FEAT_CHANGE:
bond_compute_features(bond);
break;
case NETDEV_RESEND_IGMP:
/* Propagate to master device */
call_netdevice_notifiers(event, slave->bond->dev);
break;
default:
break;
}
......
......@@ -622,6 +622,86 @@ static int team_change_mode(struct team *team, const char *kind)
}
/*********************
* Peers notification
*********************/
static void team_notify_peers_work(struct work_struct *work)
{
struct team *team;
team = container_of(work, struct team, notify_peers.dw.work);
if (!rtnl_trylock()) {
schedule_delayed_work(&team->notify_peers.dw, 0);
return;
}
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
rtnl_unlock();
if (!atomic_dec_and_test(&team->notify_peers.count_pending))
schedule_delayed_work(&team->notify_peers.dw,
msecs_to_jiffies(team->notify_peers.interval));
}
static void team_notify_peers(struct team *team)
{
if (!team->notify_peers.count || !netif_running(team->dev))
return;
atomic_set(&team->notify_peers.count_pending, team->notify_peers.count);
schedule_delayed_work(&team->notify_peers.dw, 0);
}
static void team_notify_peers_init(struct team *team)
{
INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
}
static void team_notify_peers_fini(struct team *team)
{
cancel_delayed_work_sync(&team->notify_peers.dw);
}
/*******************************
* Send multicast group rejoins
*******************************/
static void team_mcast_rejoin_work(struct work_struct *work)
{
struct team *team;
team = container_of(work, struct team, mcast_rejoin.dw.work);
if (!rtnl_trylock()) {
schedule_delayed_work(&team->mcast_rejoin.dw, 0);
return;
}
call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
rtnl_unlock();
if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
schedule_delayed_work(&team->mcast_rejoin.dw,
msecs_to_jiffies(team->mcast_rejoin.interval));
}
static void team_mcast_rejoin(struct team *team)
{
if (!team->mcast_rejoin.count || !netif_running(team->dev))
return;
atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count);
schedule_delayed_work(&team->mcast_rejoin.dw, 0);
}
static void team_mcast_rejoin_init(struct team *team)
{
INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
}
static void team_mcast_rejoin_fini(struct team *team)
{
cancel_delayed_work_sync(&team->mcast_rejoin.dw);
}
/************************
* Rx path frame handler
************************/
......@@ -846,6 +926,8 @@ static void team_port_enable(struct team *team,
team_queue_override_port_add(team, port);
if (team->ops.port_enabled)
team->ops.port_enabled(team, port);
team_notify_peers(team);
team_mcast_rejoin(team);
}
static void __reconstruct_port_hlist(struct team *team, int rm_index)
......@@ -875,6 +957,8 @@ static void team_port_disable(struct team *team,
team->en_port_count--;
team_queue_override_port_del(team, port);
team_adjust_ops(team);
team_notify_peers(team);
team_mcast_rejoin(team);
}
#define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
......@@ -1205,6 +1289,62 @@ static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
return team_change_mode(team, ctx->data.str_val);
}
static int team_notify_peers_count_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
ctx->data.u32_val = team->notify_peers.count;
return 0;
}
static int team_notify_peers_count_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
team->notify_peers.count = ctx->data.u32_val;
return 0;
}
static int team_notify_peers_interval_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
ctx->data.u32_val = team->notify_peers.interval;
return 0;
}
static int team_notify_peers_interval_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
team->notify_peers.interval = ctx->data.u32_val;
return 0;
}
static int team_mcast_rejoin_count_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
ctx->data.u32_val = team->mcast_rejoin.count;
return 0;
}
static int team_mcast_rejoin_count_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
team->mcast_rejoin.count = ctx->data.u32_val;
return 0;
}
static int team_mcast_rejoin_interval_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
ctx->data.u32_val = team->mcast_rejoin.interval;
return 0;
}
static int team_mcast_rejoin_interval_set(struct team *team,
struct team_gsetter_ctx *ctx)
{
team->mcast_rejoin.interval = ctx->data.u32_val;
return 0;
}
static int team_port_en_option_get(struct team *team,
struct team_gsetter_ctx *ctx)
{
......@@ -1316,6 +1456,30 @@ static const struct team_option team_options[] = {
.getter = team_mode_option_get,
.setter = team_mode_option_set,
},
{
.name = "notify_peers_count",
.type = TEAM_OPTION_TYPE_U32,
.getter = team_notify_peers_count_get,
.setter = team_notify_peers_count_set,
},
{
.name = "notify_peers_interval",
.type = TEAM_OPTION_TYPE_U32,
.getter = team_notify_peers_interval_get,
.setter = team_notify_peers_interval_set,
},
{
.name = "mcast_rejoin_count",
.type = TEAM_OPTION_TYPE_U32,
.getter = team_mcast_rejoin_count_get,
.setter = team_mcast_rejoin_count_set,
},
{
.name = "mcast_rejoin_interval",
.type = TEAM_OPTION_TYPE_U32,
.getter = team_mcast_rejoin_interval_get,
.setter = team_mcast_rejoin_interval_set,
},
{
.name = "enabled",
.type = TEAM_OPTION_TYPE_BOOL,
......@@ -1396,6 +1560,10 @@ static int team_init(struct net_device *dev)
INIT_LIST_HEAD(&team->option_list);
INIT_LIST_HEAD(&team->option_inst_list);
team_notify_peers_init(team);
team_mcast_rejoin_init(team);
err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
if (err)
goto err_options_register;
......@@ -1406,6 +1574,8 @@ static int team_init(struct net_device *dev)
return 0;
err_options_register:
team_mcast_rejoin_fini(team);
team_notify_peers_fini(team);
team_queue_override_fini(team);
err_team_queue_override_init:
free_percpu(team->pcpu_stats);
......@@ -1425,6 +1595,8 @@ static void team_uninit(struct net_device *dev)
__team_change_mode(team, NULL); /* cleanup */
__team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
team_mcast_rejoin_fini(team);
team_notify_peers_fini(team);
team_queue_override_fini(team);
mutex_unlock(&team->lock);
}
......@@ -2698,6 +2870,10 @@ static int team_device_event(struct notifier_block *unused,
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid to change type of underlaying device */
return NOTIFY_BAD;
case NETDEV_RESEND_IGMP:
/* Propagate to master device */
call_netdevice_notifiers(event, port->team->dev);
break;
}
return NOTIFY_DONE;
}
......
......@@ -10,9 +10,9 @@
#ifndef _LINUX_IF_TEAM_H_
#define _LINUX_IF_TEAM_H_
#include <linux/netpoll.h>
#include <net/sch_generic.h>
#include <linux/types.h>
#include <uapi/linux/if_team.h>
struct team_pcpu_stats {
......@@ -194,6 +194,18 @@ struct team {
bool user_carrier_enabled;
bool queue_override_enabled;
struct list_head *qom_lists; /* array of queue override mapping lists */
struct {
unsigned int count;
unsigned int interval; /* in ms */
atomic_t count_pending;
struct delayed_work dw;
} notify_peers;
struct {
unsigned int count;
unsigned int interval; /* in ms */
atomic_t count_pending;
struct delayed_work dw;
} mcast_rejoin;
long mode_priv[TEAM_MODE_PRIV_LONGS];
};
......
......@@ -129,6 +129,5 @@ extern void ip_mc_unmap(struct in_device *);
extern void ip_mc_remap(struct in_device *);
extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr);
extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr);
extern void ip_mc_rejoin_groups(struct in_device *in_dev);
#endif
......@@ -1633,6 +1633,7 @@ struct packet_offload {
#define NETDEV_NOTIFY_PEERS 0x0013
#define NETDEV_JOIN 0x0014
#define NETDEV_CHANGEUPPER 0x0015
#define NETDEV_RESEND_IGMP 0x0016
extern int register_netdevice_notifier(struct notifier_block *nb);
extern int unregister_netdevice_notifier(struct notifier_block *nb);
......
......@@ -459,6 +459,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
case NETDEV_NOTIFY_PEERS:
case NETDEV_BONDING_FAILOVER:
case NETDEV_RESEND_IGMP:
/* Propagate to vlan devices */
vlan_group_for_each_dev(grp, i, vlandev)
call_netdevice_notifiers(event, vlandev);
......
......@@ -102,6 +102,11 @@ static int br_device_event(struct notifier_block *unused, unsigned long event, v
case NETDEV_PRE_TYPE_CHANGE:
/* Forbid underlaying device to change its type. */
return NOTIFY_BAD;
case NETDEV_RESEND_IGMP:
/* Propagate to master device */
call_netdevice_notifiers(event, br->dev);
break;
}
/* Events that may cause spanning tree to refresh */
......
......@@ -1323,16 +1323,17 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
EXPORT_SYMBOL(ip_mc_inc_group);
/*
* Resend IGMP JOIN report; used for bonding.
* Called with rcu_read_lock()
* Resend IGMP JOIN report; used by netdev notifier.
*/
void ip_mc_rejoin_groups(struct in_device *in_dev)
static void ip_mc_rejoin_groups(struct in_device *in_dev)
{
#ifdef CONFIG_IP_MULTICAST
struct ip_mc_list *im;
int type;
for_each_pmc_rcu(in_dev, im) {
ASSERT_RTNL();
for_each_pmc_rtnl(in_dev, im) {
if (im->multiaddr == IGMP_ALL_HOSTS)
continue;
......@@ -1349,7 +1350,6 @@ void ip_mc_rejoin_groups(struct in_device *in_dev)
}
#endif
}
EXPORT_SYMBOL(ip_mc_rejoin_groups);
/*
* A socket has left a multicast group on device dev
......@@ -2735,8 +2735,42 @@ static struct pernet_operations igmp_net_ops = {
.exit = igmp_net_exit,
};
static int igmp_netdev_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct in_device *in_dev;
switch (event) {
case NETDEV_RESEND_IGMP:
in_dev = __in_dev_get_rtnl(dev);
if (in_dev)
ip_mc_rejoin_groups(in_dev);
break;
default:
break;
}
return NOTIFY_DONE;
}
static struct notifier_block igmp_notifier = {
.notifier_call = igmp_netdev_event,
};
int __init igmp_mc_proc_init(void)
{
return register_pernet_subsys(&igmp_net_ops);
int err;
err = register_pernet_subsys(&igmp_net_ops);
if (err)
return err;
err = register_netdevice_notifier(&igmp_notifier);
if (err)
goto reg_notif_fail;
return 0;
reg_notif_fail:
unregister_pernet_subsys(&igmp_net_ops);
return err;
}
#endif
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册