提交 23d8335d 编写于 作者: J Jon Paul Maloy 提交者: David S. Miller

tipc: remove implicit message delivery in node_unlock()

After the most recent changes, all access calls to a link which
may entail addition of messages to the link's input queue are
postpended by an explicit call to tipc_sk_rcv(), using a reference
to the correct queue.

This means that the potentially hazardous implicit delivery, using
tipc_node_unlock() in combination with a binary flag and a cached
queue pointer, now has become redundant.

This commit removes this implicit delivery mechanism both for regular
data messages and for binding table update messages.
Tested-by: NYing Xue <ying.xue@windriver.com>
Signed-off-by: NJon Maloy <jon.maloy@ericsson.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 598411d7
......@@ -559,8 +559,6 @@ void link_prepare_wakeup(struct tipc_link *l)
break;
skb_unlink(skb, &l->wakeupq);
skb_queue_tail(l->inputq, skb);
l->owner->inputq = l->inputq;
l->owner->action_flags |= TIPC_MSG_EVT;
}
}
......@@ -598,8 +596,6 @@ void tipc_link_purge_queues(struct tipc_link *l_ptr)
void tipc_link_reset(struct tipc_link *l)
{
struct tipc_node *owner = l->owner;
tipc_link_fsm_evt(l, LINK_RESET_EVT);
/* Link is down, accept any session */
......@@ -611,14 +607,10 @@ void tipc_link_reset(struct tipc_link *l)
/* Prepare for renewed mtu size negotiation */
l->mtu = l->advertised_mtu;
/* Clean up all queues, except inputq: */
/* Clean up all queues: */
__skb_queue_purge(&l->transmq);
__skb_queue_purge(&l->deferdq);
if (!owner->inputq)
owner->inputq = l->inputq;
skb_queue_splice_init(&l->wakeupq, owner->inputq);
if (!skb_queue_empty(owner->inputq))
owner->action_flags |= TIPC_MSG_EVT;
skb_queue_splice_init(&l->wakeupq, l->inputq);
tipc_link_purge_backlog(l);
kfree_skb(l->reasm_buf);
......@@ -972,7 +964,6 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
{
struct tipc_node *node = link->owner;
struct tipc_msg *msg = buf_msg(skb);
u32 dport = msg_destport(msg);
switch (msg_user(msg)) {
case TIPC_LOW_IMPORTANCE:
......@@ -980,17 +971,11 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
case TIPC_HIGH_IMPORTANCE:
case TIPC_CRITICAL_IMPORTANCE:
case CONN_MANAGER:
if (tipc_skb_queue_tail(link->inputq, skb, dport)) {
node->inputq = link->inputq;
node->action_flags |= TIPC_MSG_EVT;
}
skb_queue_tail(link->inputq, skb);
return true;
case NAME_DISTRIBUTOR:
node->bclink.recv_permitted = true;
node->namedq = link->namedq;
skb_queue_tail(link->namedq, skb);
if (skb_queue_len(link->namedq) == 1)
node->action_flags |= TIPC_NAMED_MSG_EVT;
return true;
case MSG_BUNDLER:
case TUNNEL_PROTOCOL:
......
......@@ -862,28 +862,6 @@ static inline struct sk_buff *tipc_skb_dequeue(struct sk_buff_head *list,
return skb;
}
/* tipc_skb_queue_tail(): add buffer to tail of list;
* @list: list to be appended to
* @skb: buffer to append. Always appended
* @dport: the destination port of the buffer
* returns true if dport differs from previous destination
*/
static inline bool tipc_skb_queue_tail(struct sk_buff_head *list,
struct sk_buff *skb, u32 dport)
{
struct sk_buff *_skb = NULL;
bool rv = false;
spin_lock_bh(&list->lock);
_skb = skb_peek_tail(list);
if (!_skb || (msg_destport(buf_msg(_skb)) != dport) ||
(skb_queue_len(list) > 32))
rv = true;
__skb_queue_tail(list, skb);
spin_unlock_bh(&list->lock);
return rv;
}
/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
* @list: list to be appended to
* @skb: buffer to add
......
......@@ -873,10 +873,8 @@ static void node_lost_contact(struct tipc_node *n_ptr,
SHORT_H_SIZE, 0, tn->own_addr,
conn->peer_node, conn->port,
conn->peer_port, TIPC_ERR_NO_NODE);
if (likely(skb)) {
if (likely(skb))
skb_queue_tail(inputq, skb);
n_ptr->action_flags |= TIPC_MSG_EVT;
}
list_del(&conn->list);
kfree(conn);
}
......@@ -923,27 +921,20 @@ void tipc_node_unlock(struct tipc_node *node)
u32 flags = node->action_flags;
u32 link_id = 0;
struct list_head *publ_list;
struct sk_buff_head *inputq = node->inputq;
struct sk_buff_head *namedq;
if (likely(!flags || (flags == TIPC_MSG_EVT))) {
node->action_flags = 0;
if (likely(!flags)) {
spin_unlock_bh(&node->lock);
if (flags == TIPC_MSG_EVT)
tipc_sk_rcv(net, inputq);
return;
}
addr = node->addr;
link_id = node->link_id;
namedq = node->namedq;
publ_list = &node->publ_list;
node->action_flags &= ~(TIPC_MSG_EVT |
TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
node->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP |
TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT |
TIPC_NAMED_MSG_EVT | TIPC_BCAST_RESET);
TIPC_BCAST_RESET);
spin_unlock_bh(&node->lock);
......@@ -964,12 +955,6 @@ void tipc_node_unlock(struct tipc_node *node)
tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
link_id, addr);
if (flags & TIPC_MSG_EVT)
tipc_sk_rcv(net, inputq);
if (flags & TIPC_NAMED_MSG_EVT)
tipc_named_rcv(net, namedq);
if (flags & TIPC_BCAST_MSG_EVT)
tipc_bclink_input(net);
......@@ -1270,6 +1255,9 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
if (unlikely(rc & TIPC_LINK_DOWN_EVT))
tipc_node_link_down(n, bearer_id, false);
if (unlikely(!skb_queue_empty(&n->bclink.namedq)))
tipc_named_rcv(net, &n->bclink.namedq);
if (!skb_queue_empty(&le->inputq))
tipc_sk_rcv(net, &le->inputq);
......
......@@ -53,13 +53,11 @@
* TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
*/
enum {
TIPC_MSG_EVT = 1,
TIPC_NOTIFY_NODE_DOWN = (1 << 3),
TIPC_NOTIFY_NODE_UP = (1 << 4),
TIPC_WAKEUP_BCAST_USERS = (1 << 5),
TIPC_NOTIFY_LINK_UP = (1 << 6),
TIPC_NOTIFY_LINK_DOWN = (1 << 7),
TIPC_NAMED_MSG_EVT = (1 << 8),
TIPC_BCAST_MSG_EVT = (1 << 9),
TIPC_BCAST_RESET = (1 << 10)
};
......@@ -124,8 +122,6 @@ struct tipc_node {
spinlock_t lock;
struct net *net;
struct hlist_node hash;
struct sk_buff_head *inputq;
struct sk_buff_head *namedq;
int active_links[2];
struct tipc_link_entry links[MAX_BEARERS];
int action_flags;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册