提交 e951f145 编写于 作者: D David S. Miller

Merge branch 'tipc-next'

Jon Maloy says:

====================
tipc: bearer and link improvements

The first commit makes it possible to set and check the 'blocked' state
of a bearer from the generic bearer layer. The second commit is a small
improvement to the link congestion mechanism.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -56,6 +56,13 @@ static struct tipc_media * const media_info_array[] = { ...@@ -56,6 +56,13 @@ static struct tipc_media * const media_info_array[] = {
NULL NULL
}; };
static struct tipc_bearer *bearer_get(struct net *net, int bearer_id)
{
struct tipc_net *tn = tipc_net(net);
return rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
}
static void bearer_disable(struct net *net, struct tipc_bearer *b); static void bearer_disable(struct net *net, struct tipc_bearer *b);
/** /**
...@@ -323,6 +330,7 @@ static int tipc_enable_bearer(struct net *net, const char *name, ...@@ -323,6 +330,7 @@ static int tipc_enable_bearer(struct net *net, const char *name,
b->domain = disc_domain; b->domain = disc_domain;
b->net_plane = bearer_id + 'A'; b->net_plane = bearer_id + 'A';
b->priority = priority; b->priority = priority;
test_and_set_bit_lock(0, &b->up);
res = tipc_disc_create(net, b, &b->bcast_addr, &skb); res = tipc_disc_create(net, b, &b->bcast_addr, &skb);
if (res) { if (res) {
...@@ -360,15 +368,24 @@ static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b) ...@@ -360,15 +368,24 @@ static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b)
*/ */
void tipc_bearer_reset_all(struct net *net) void tipc_bearer_reset_all(struct net *net)
{ {
struct tipc_net *tn = tipc_net(net);
struct tipc_bearer *b; struct tipc_bearer *b;
int i; int i;
for (i = 0; i < MAX_BEARERS; i++) { for (i = 0; i < MAX_BEARERS; i++) {
b = rcu_dereference_rtnl(tn->bearer_list[i]); b = bearer_get(net, i);
if (b)
clear_bit_unlock(0, &b->up);
}
for (i = 0; i < MAX_BEARERS; i++) {
b = bearer_get(net, i);
if (b) if (b)
tipc_reset_bearer(net, b); tipc_reset_bearer(net, b);
} }
for (i = 0; i < MAX_BEARERS; i++) {
b = bearer_get(net, i);
if (b)
test_and_set_bit_lock(0, &b->up);
}
} }
/** /**
...@@ -382,8 +399,9 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b) ...@@ -382,8 +399,9 @@ static void bearer_disable(struct net *net, struct tipc_bearer *b)
int bearer_id = b->identity; int bearer_id = b->identity;
pr_info("Disabling bearer <%s>\n", b->name); pr_info("Disabling bearer <%s>\n", b->name);
b->media->disable_media(b); clear_bit_unlock(0, &b->up);
tipc_node_delete_links(net, bearer_id); tipc_node_delete_links(net, bearer_id);
b->media->disable_media(b);
RCU_INIT_POINTER(b->media_ptr, NULL); RCU_INIT_POINTER(b->media_ptr, NULL);
if (b->link_req) if (b->link_req)
tipc_disc_delete(b->link_req); tipc_disc_delete(b->link_req);
...@@ -440,22 +458,16 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb, ...@@ -440,22 +458,16 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
{ {
struct net_device *dev; struct net_device *dev;
int delta; int delta;
void *tipc_ptr;
dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr); dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr);
if (!dev) if (!dev)
return 0; return 0;
/* Send RESET message even if bearer is detached from device */ delta = SKB_DATA_ALIGN(dev->hard_header_len - skb_headroom(skb));
tipc_ptr = rcu_dereference_rtnl(dev->tipc_ptr); if ((delta > 0) && pskb_expand_head(skb, delta, 0, GFP_ATOMIC)) {
if (unlikely(!tipc_ptr && !msg_is_reset(buf_msg(skb)))) kfree_skb(skb);
goto drop; return 0;
}
delta = dev->hard_header_len - skb_headroom(skb);
if ((delta > 0) &&
pskb_expand_head(skb, SKB_DATA_ALIGN(delta), 0, GFP_ATOMIC))
goto drop;
skb_reset_network_header(skb); skb_reset_network_header(skb);
skb->dev = dev; skb->dev = dev;
skb->protocol = htons(ETH_P_TIPC); skb->protocol = htons(ETH_P_TIPC);
...@@ -463,9 +475,6 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb, ...@@ -463,9 +475,6 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
dev->dev_addr, skb->len); dev->dev_addr, skb->len);
dev_queue_xmit(skb); dev_queue_xmit(skb);
return 0; return 0;
drop:
kfree_skb(skb);
return 0;
} }
int tipc_bearer_mtu(struct net *net, u32 bearer_id) int tipc_bearer_mtu(struct net *net, u32 bearer_id)
...@@ -487,12 +496,12 @@ void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id, ...@@ -487,12 +496,12 @@ void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
struct sk_buff *skb, struct sk_buff *skb,
struct tipc_media_addr *dest) struct tipc_media_addr *dest)
{ {
struct tipc_net *tn = tipc_net(net); struct tipc_msg *hdr = buf_msg(skb);
struct tipc_bearer *b; struct tipc_bearer *b;
rcu_read_lock(); rcu_read_lock();
b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]); b = bearer_get(net, bearer_id);
if (likely(b)) if (likely(b && (test_bit(0, &b->up) || msg_is_reset(hdr))))
b->media->send_msg(net, skb, b, dest); b->media->send_msg(net, skb, b, dest);
else else
kfree_skb(skb); kfree_skb(skb);
...@@ -505,7 +514,6 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id, ...@@ -505,7 +514,6 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id,
struct sk_buff_head *xmitq, struct sk_buff_head *xmitq,
struct tipc_media_addr *dst) struct tipc_media_addr *dst)
{ {
struct tipc_net *tn = net_generic(net, tipc_net_id);
struct tipc_bearer *b; struct tipc_bearer *b;
struct sk_buff *skb, *tmp; struct sk_buff *skb, *tmp;
...@@ -513,12 +521,15 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id, ...@@ -513,12 +521,15 @@ void tipc_bearer_xmit(struct net *net, u32 bearer_id,
return; return;
rcu_read_lock(); rcu_read_lock();
b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]); b = bearer_get(net, bearer_id);
if (unlikely(!b)) if (unlikely(!b))
__skb_queue_purge(xmitq); __skb_queue_purge(xmitq);
skb_queue_walk_safe(xmitq, skb, tmp) { skb_queue_walk_safe(xmitq, skb, tmp) {
__skb_dequeue(xmitq); __skb_dequeue(xmitq);
if (likely(test_bit(0, &b->up) || msg_is_reset(buf_msg(skb))))
b->media->send_msg(net, skb, b, dst); b->media->send_msg(net, skb, b, dst);
else
kfree(skb);
} }
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -535,8 +546,8 @@ void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id, ...@@ -535,8 +546,8 @@ void tipc_bearer_bc_xmit(struct net *net, u32 bearer_id,
struct tipc_msg *hdr; struct tipc_msg *hdr;
rcu_read_lock(); rcu_read_lock();
b = rcu_dereference_rtnl(tn->bearer_list[bearer_id]); b = bearer_get(net, bearer_id);
if (unlikely(!b)) if (unlikely(!b || !test_bit(0, &b->up)))
__skb_queue_purge(xmitq); __skb_queue_purge(xmitq);
skb_queue_walk_safe(xmitq, skb, tmp) { skb_queue_walk_safe(xmitq, skb, tmp) {
hdr = buf_msg(skb); hdr = buf_msg(skb);
...@@ -566,7 +577,8 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev, ...@@ -566,7 +577,8 @@ static int tipc_l2_rcv_msg(struct sk_buff *skb, struct net_device *dev,
rcu_read_lock(); rcu_read_lock();
b = rcu_dereference_rtnl(dev->tipc_ptr); b = rcu_dereference_rtnl(dev->tipc_ptr);
if (likely(b && (skb->pkt_type <= PACKET_BROADCAST))) { if (likely(b && test_bit(0, &b->up) &&
(skb->pkt_type <= PACKET_BROADCAST))) {
skb->next = NULL; skb->next = NULL;
tipc_rcv(dev_net(dev), skb, b); tipc_rcv(dev_net(dev), skb, b);
rcu_read_unlock(); rcu_read_unlock();
...@@ -591,18 +603,9 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, ...@@ -591,18 +603,9 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
{ {
struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net_device *dev = netdev_notifier_info_to_dev(ptr);
struct net *net = dev_net(dev); struct net *net = dev_net(dev);
struct tipc_net *tn = tipc_net(net);
struct tipc_bearer *b; struct tipc_bearer *b;
int i;
b = rtnl_dereference(dev->tipc_ptr); b = rtnl_dereference(dev->tipc_ptr);
if (!b) {
for (i = 0; i < MAX_BEARERS; b = NULL, i++) {
b = rtnl_dereference(tn->bearer_list[i]);
if (b && (b->media_ptr == dev))
break;
}
}
if (!b) if (!b)
return NOTIFY_DONE; return NOTIFY_DONE;
...@@ -613,11 +616,10 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, ...@@ -613,11 +616,10 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
if (netif_carrier_ok(dev)) if (netif_carrier_ok(dev))
break; break;
case NETDEV_UP: case NETDEV_UP:
rcu_assign_pointer(dev->tipc_ptr, b); test_and_set_bit_lock(0, &b->up);
break; break;
case NETDEV_GOING_DOWN: case NETDEV_GOING_DOWN:
RCU_INIT_POINTER(dev->tipc_ptr, NULL); clear_bit_unlock(0, &b->up);
synchronize_net();
tipc_reset_bearer(net, b); tipc_reset_bearer(net, b);
break; break;
case NETDEV_CHANGEMTU: case NETDEV_CHANGEMTU:
......
...@@ -150,6 +150,7 @@ struct tipc_bearer { ...@@ -150,6 +150,7 @@ struct tipc_bearer {
u32 identity; u32 identity;
struct tipc_link_req *link_req; struct tipc_link_req *link_req;
char net_plane; char net_plane;
unsigned long up;
}; };
struct tipc_bearer_names { struct tipc_bearer_names {
......
...@@ -807,7 +807,7 @@ void link_prepare_wakeup(struct tipc_link *l) ...@@ -807,7 +807,7 @@ void link_prepare_wakeup(struct tipc_link *l)
skb_queue_walk_safe(&l->wakeupq, skb, tmp) { skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
imp = TIPC_SKB_CB(skb)->chain_imp; imp = TIPC_SKB_CB(skb)->chain_imp;
lim = l->window + l->backlog[imp].limit; lim = l->backlog[imp].limit;
pnd[imp] += TIPC_SKB_CB(skb)->chain_sz; pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
if ((pnd[imp] + l->backlog[imp].len) >= lim) if ((pnd[imp] + l->backlog[imp].len) >= lim)
break; break;
...@@ -873,10 +873,12 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list, ...@@ -873,10 +873,12 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
struct sk_buff *skb, *_skb, *bskb; struct sk_buff *skb, *_skb, *bskb;
/* Match msg importance against this and all higher backlog limits: */ /* Match msg importance against this and all higher backlog limits: */
if (!skb_queue_empty(backlogq)) {
for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) { for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
if (unlikely(l->backlog[i].len >= l->backlog[i].limit)) if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
return link_schedule_user(l, list); return link_schedule_user(l, list);
} }
}
if (unlikely(msg_size(hdr) > mtu)) { if (unlikely(msg_size(hdr) > mtu)) {
skb_queue_purge(list); skb_queue_purge(list);
return -EMSGSIZE; return -EMSGSIZE;
...@@ -1692,10 +1694,10 @@ void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) ...@@ -1692,10 +1694,10 @@ void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE); int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
l->window = win; l->window = win;
l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2; l->backlog[TIPC_LOW_IMPORTANCE].limit = max_t(u16, 50, win);
l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win; l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = max_t(u16, 100, win * 2);
l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3; l->backlog[TIPC_HIGH_IMPORTANCE].limit = max_t(u16, 150, win * 3);
l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2; l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk; l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
} }
......
...@@ -224,7 +224,7 @@ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb) ...@@ -224,7 +224,7 @@ static int tipc_udp_recv(struct sock *sk, struct sk_buff *skb)
rcu_read_lock(); rcu_read_lock();
b = rcu_dereference_rtnl(ub->bearer); b = rcu_dereference_rtnl(ub->bearer);
if (b) { if (b && test_bit(0, &b->up)) {
tipc_rcv(sock_net(sk), skb, b); tipc_rcv(sock_net(sk), skb, b);
rcu_read_unlock(); rcu_read_unlock();
return 0; return 0;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册