提交 a6ca1094 编写于 作者: Y Ying Xue 提交者: David S. Miller

tipc: use generic SKB list APIs to manage TIPC outgoing packet chains

Use standard SKB list APIs associated with struct sk_buff_head to
manage socket outgoing packet chain and name table outgoing packet
chain, having relevant code simpler and more readable.
Signed-off-by: NYing Xue <ying.xue@windriver.com>
Reviewed-by: NJon Maloy <jon.maloy@ericsson.com>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 f03273f1
...@@ -398,20 +398,20 @@ static void bclink_peek_nack(struct tipc_msg *msg) ...@@ -398,20 +398,20 @@ static void bclink_peek_nack(struct tipc_msg *msg)
/* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster /* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster
* and to identified node local sockets * and to identified node local sockets
* @buf: chain of buffers containing message * @list: chain of buffers containing message
* Consumes the buffer chain, except when returning -ELINKCONG * Consumes the buffer chain, except when returning -ELINKCONG
* Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
*/ */
int tipc_bclink_xmit(struct sk_buff *buf) int tipc_bclink_xmit(struct sk_buff_head *list)
{ {
int rc = 0; int rc = 0;
int bc = 0; int bc = 0;
struct sk_buff *clbuf; struct sk_buff *skb;
/* Prepare clone of message for local node */ /* Prepare clone of message for local node */
clbuf = tipc_msg_reassemble(buf); skb = tipc_msg_reassemble(list);
if (unlikely(!clbuf)) { if (unlikely(!skb)) {
kfree_skb_list(buf); __skb_queue_purge(list);
return -EHOSTUNREACH; return -EHOSTUNREACH;
} }
...@@ -419,7 +419,7 @@ int tipc_bclink_xmit(struct sk_buff *buf) ...@@ -419,7 +419,7 @@ int tipc_bclink_xmit(struct sk_buff *buf)
if (likely(bclink)) { if (likely(bclink)) {
tipc_bclink_lock(); tipc_bclink_lock();
if (likely(bclink->bcast_nodes.count)) { if (likely(bclink->bcast_nodes.count)) {
rc = __tipc_link_xmit(bcl, buf); rc = __tipc_link_xmit(bcl, list);
if (likely(!rc)) { if (likely(!rc)) {
u32 len = skb_queue_len(&bcl->outqueue); u32 len = skb_queue_len(&bcl->outqueue);
...@@ -433,13 +433,13 @@ int tipc_bclink_xmit(struct sk_buff *buf) ...@@ -433,13 +433,13 @@ int tipc_bclink_xmit(struct sk_buff *buf)
} }
if (unlikely(!bc)) if (unlikely(!bc))
kfree_skb_list(buf); __skb_queue_purge(list);
/* Deliver message clone */ /* Deliver message clone */
if (likely(!rc)) if (likely(!rc))
tipc_sk_mcast_rcv(clbuf); tipc_sk_mcast_rcv(skb);
else else
kfree_skb(clbuf); kfree_skb(skb);
return rc; return rc;
} }
......
...@@ -100,7 +100,7 @@ int tipc_bclink_reset_stats(void); ...@@ -100,7 +100,7 @@ int tipc_bclink_reset_stats(void);
int tipc_bclink_set_queue_limits(u32 limit); int tipc_bclink_set_queue_limits(u32 limit);
void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action); void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action);
uint tipc_bclink_get_mtu(void); uint tipc_bclink_get_mtu(void);
int tipc_bclink_xmit(struct sk_buff *buf); int tipc_bclink_xmit(struct sk_buff_head *list);
void tipc_bclink_wakeup_users(void); void tipc_bclink_wakeup_users(void);
int tipc_nl_add_bc_link(struct tipc_nl_msg *msg); int tipc_nl_add_bc_link(struct tipc_nl_msg *msg);
......
...@@ -664,9 +664,10 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) ...@@ -664,9 +664,10 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
* - For all other messages we discard the buffer and return -EHOSTUNREACH * - For all other messages we discard the buffer and return -EHOSTUNREACH
* - For TIPC internal messages we also reset the link * - For TIPC internal messages we also reset the link
*/ */
static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf) static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list)
{ {
struct tipc_msg *msg = buf_msg(buf); struct sk_buff *skb = skb_peek(list);
struct tipc_msg *msg = buf_msg(skb);
uint imp = tipc_msg_tot_importance(msg); uint imp = tipc_msg_tot_importance(msg);
u32 oport = msg_tot_origport(msg); u32 oport = msg_tot_origport(msg);
...@@ -679,28 +680,29 @@ static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf) ...@@ -679,28 +680,29 @@ static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf)
goto drop; goto drop;
if (unlikely(msg_reroute_cnt(msg))) if (unlikely(msg_reroute_cnt(msg)))
goto drop; goto drop;
if (TIPC_SKB_CB(buf)->wakeup_pending) if (TIPC_SKB_CB(skb)->wakeup_pending)
return -ELINKCONG; return -ELINKCONG;
if (link_schedule_user(link, oport, TIPC_SKB_CB(buf)->chain_sz, imp)) if (link_schedule_user(link, oport, skb_queue_len(list), imp))
return -ELINKCONG; return -ELINKCONG;
drop: drop:
kfree_skb_list(buf); __skb_queue_purge(list);
return -EHOSTUNREACH; return -EHOSTUNREACH;
} }
/** /**
* __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
* @link: link to use * @link: link to use
* @skb: chain of buffers containing message * @list: chain of buffers containing message
*
* Consumes the buffer chain, except when returning -ELINKCONG * Consumes the buffer chain, except when returning -ELINKCONG
* Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
* user data messages) or -EHOSTUNREACH (all other messages/senders) * user data messages) or -EHOSTUNREACH (all other messages/senders)
* Only the socket functions tipc_send_stream() and tipc_send_packet() need * Only the socket functions tipc_send_stream() and tipc_send_packet() need
* to act on the return value, since they may need to do more send attempts. * to act on the return value, since they may need to do more send attempts.
*/ */
int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *skb) int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list)
{ {
struct tipc_msg *msg = buf_msg(skb); struct tipc_msg *msg = buf_msg(skb_peek(list));
uint psz = msg_size(msg); uint psz = msg_size(msg);
uint sndlim = link->queue_limit[0]; uint sndlim = link->queue_limit[0];
uint imp = tipc_msg_tot_importance(msg); uint imp = tipc_msg_tot_importance(msg);
...@@ -710,21 +712,21 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *skb) ...@@ -710,21 +712,21 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *skb)
uint bc_last_in = link->owner->bclink.last_in; uint bc_last_in = link->owner->bclink.last_in;
struct tipc_media_addr *addr = &link->media_addr; struct tipc_media_addr *addr = &link->media_addr;
struct sk_buff_head *outqueue = &link->outqueue; struct sk_buff_head *outqueue = &link->outqueue;
struct sk_buff *next; struct sk_buff *skb, *tmp;
/* Match queue limits against msg importance: */ /* Match queue limits against msg importance: */
if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp])) if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp]))
return tipc_link_cong(link, skb); return tipc_link_cong(link, list);
/* Has valid packet limit been used ? */ /* Has valid packet limit been used ? */
if (unlikely(psz > mtu)) { if (unlikely(psz > mtu)) {
kfree_skb_list(skb); __skb_queue_purge(list);
return -EMSGSIZE; return -EMSGSIZE;
} }
/* Prepare each packet for sending, and add to outqueue: */ /* Prepare each packet for sending, and add to outqueue: */
while (skb) { skb_queue_walk_safe(list, skb, tmp) {
next = skb->next; __skb_unlink(skb, list);
msg = buf_msg(skb); msg = buf_msg(skb);
msg_set_word(msg, 2, ((ack << 16) | mod(seqno))); msg_set_word(msg, 2, ((ack << 16) | mod(seqno)));
msg_set_bcast_ack(msg, bc_last_in); msg_set_bcast_ack(msg, bc_last_in);
...@@ -736,7 +738,6 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *skb) ...@@ -736,7 +738,6 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *skb)
link->unacked_window = 0; link->unacked_window = 0;
} else if (tipc_msg_bundle(outqueue, skb, mtu)) { } else if (tipc_msg_bundle(outqueue, skb, mtu)) {
link->stats.sent_bundled++; link->stats.sent_bundled++;
skb = next;
continue; continue;
} else if (tipc_msg_make_bundle(outqueue, skb, mtu, } else if (tipc_msg_make_bundle(outqueue, skb, mtu,
link->addr)) { link->addr)) {
...@@ -750,22 +751,43 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *skb) ...@@ -750,22 +751,43 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *skb)
link->next_out = skb; link->next_out = skb;
} }
seqno++; seqno++;
skb = next;
} }
link->next_out_no = seqno; link->next_out_no = seqno;
return 0; return 0;
} }
static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
{
__skb_queue_head_init(list);
__skb_queue_tail(list, skb);
}
static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
{
struct sk_buff_head head;
skb2list(skb, &head);
return __tipc_link_xmit(link, &head);
}
int tipc_link_xmit_skb(struct sk_buff *skb, u32 dnode, u32 selector)
{
struct sk_buff_head head;
skb2list(skb, &head);
return tipc_link_xmit(&head, dnode, selector);
}
/** /**
* tipc_link_xmit() is the general link level function for message sending * tipc_link_xmit() is the general link level function for message sending
* @buf: chain of buffers containing message * @list: chain of buffers containing message
* @dsz: amount of user data to be sent * @dsz: amount of user data to be sent
* @dnode: address of destination node * @dnode: address of destination node
* @selector: a number used for deterministic link selection * @selector: a number used for deterministic link selection
* Consumes the buffer chain, except when returning -ELINKCONG * Consumes the buffer chain, except when returning -ELINKCONG
* Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
*/ */
int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector) int tipc_link_xmit(struct sk_buff_head *list, u32 dnode, u32 selector)
{ {
struct tipc_link *link = NULL; struct tipc_link *link = NULL;
struct tipc_node *node; struct tipc_node *node;
...@@ -776,17 +798,22 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector) ...@@ -776,17 +798,22 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector)
tipc_node_lock(node); tipc_node_lock(node);
link = node->active_links[selector & 1]; link = node->active_links[selector & 1];
if (link) if (link)
rc = __tipc_link_xmit(link, buf); rc = __tipc_link_xmit(link, list);
tipc_node_unlock(node); tipc_node_unlock(node);
} }
if (link) if (link)
return rc; return rc;
if (likely(in_own_node(dnode))) if (likely(in_own_node(dnode))) {
return tipc_sk_rcv(buf); /* As a node local message chain never contains more than one
* buffer, we just need to dequeue one SKB buffer from the
* head list.
*/
return tipc_sk_rcv(__skb_dequeue(list));
}
__skb_queue_purge(list);
kfree_skb_list(buf);
return rc; return rc;
} }
...@@ -800,17 +827,17 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector) ...@@ -800,17 +827,17 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector)
*/ */
static void tipc_link_sync_xmit(struct tipc_link *link) static void tipc_link_sync_xmit(struct tipc_link *link)
{ {
struct sk_buff *buf; struct sk_buff *skb;
struct tipc_msg *msg; struct tipc_msg *msg;
buf = tipc_buf_acquire(INT_H_SIZE); skb = tipc_buf_acquire(INT_H_SIZE);
if (!buf) if (!skb)
return; return;
msg = buf_msg(buf); msg = buf_msg(skb);
tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr); tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr);
msg_set_last_bcast(msg, link->owner->bclink.acked); msg_set_last_bcast(msg, link->owner->bclink.acked);
__tipc_link_xmit(link, buf); __tipc_link_xmit_skb(link, skb);
} }
/* /*
...@@ -1053,8 +1080,7 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr) ...@@ -1053,8 +1080,7 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
u32 ackd; u32 ackd;
u32 released; u32 released;
__skb_queue_head_init(&head); skb2list(skb, &head);
__skb_queue_tail(&head, skb);
while ((skb = __skb_dequeue(&head))) { while ((skb = __skb_dequeue(&head))) {
/* Ensure message is well-formed */ /* Ensure message is well-formed */
...@@ -1573,7 +1599,7 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr, ...@@ -1573,7 +1599,7 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
u32 selector) u32 selector)
{ {
struct tipc_link *tunnel; struct tipc_link *tunnel;
struct sk_buff *buf; struct sk_buff *skb;
u32 length = msg_size(msg); u32 length = msg_size(msg);
tunnel = l_ptr->owner->active_links[selector & 1]; tunnel = l_ptr->owner->active_links[selector & 1];
...@@ -1582,14 +1608,14 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr, ...@@ -1582,14 +1608,14 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
return; return;
} }
msg_set_size(tunnel_hdr, length + INT_H_SIZE); msg_set_size(tunnel_hdr, length + INT_H_SIZE);
buf = tipc_buf_acquire(length + INT_H_SIZE); skb = tipc_buf_acquire(length + INT_H_SIZE);
if (!buf) { if (!skb) {
pr_warn("%sunable to send tunnel msg\n", link_co_err); pr_warn("%sunable to send tunnel msg\n", link_co_err);
return; return;
} }
skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE); skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length); skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
__tipc_link_xmit(tunnel, buf); __tipc_link_xmit_skb(tunnel, skb);
} }
...@@ -1620,7 +1646,7 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) ...@@ -1620,7 +1646,7 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
if (skb) { if (skb) {
skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE); skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
msg_set_size(&tunnel_hdr, INT_H_SIZE); msg_set_size(&tunnel_hdr, INT_H_SIZE);
__tipc_link_xmit(tunnel, skb); __tipc_link_xmit_skb(tunnel, skb);
} else { } else {
pr_warn("%sunable to send changeover msg\n", pr_warn("%sunable to send changeover msg\n",
link_co_err); link_co_err);
...@@ -1691,7 +1717,7 @@ void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, ...@@ -1691,7 +1717,7 @@ void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE); skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE);
skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data, skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data,
length); length);
__tipc_link_xmit(tunnel, outskb); __tipc_link_xmit_skb(tunnel, outskb);
if (!tipc_link_is_up(l_ptr)) if (!tipc_link_is_up(l_ptr))
return; return;
} }
......
...@@ -213,8 +213,9 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, ...@@ -213,8 +213,9 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area,
void tipc_link_reset_all(struct tipc_node *node); void tipc_link_reset_all(struct tipc_node *node);
void tipc_link_reset(struct tipc_link *l_ptr); void tipc_link_reset(struct tipc_link *l_ptr);
void tipc_link_reset_list(unsigned int bearer_id); void tipc_link_reset_list(unsigned int bearer_id);
int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector); int tipc_link_xmit_skb(struct sk_buff *skb, u32 dest, u32 selector);
int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf); int tipc_link_xmit(struct sk_buff_head *list, u32 dest, u32 selector);
int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list);
u32 tipc_link_get_max_pkt(u32 dest, u32 selector); u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
void tipc_link_bundle_rcv(struct sk_buff *buf); void tipc_link_bundle_rcv(struct sk_buff *buf);
void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob, void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
......
...@@ -166,11 +166,12 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) ...@@ -166,11 +166,12 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
* @offset: Posision in iov to start copying from * @offset: Posision in iov to start copying from
* @dsz: Total length of user data * @dsz: Total length of user data
* @pktmax: Max packet size that can be used * @pktmax: Max packet size that can be used
* @chain: Buffer or chain of buffers to be returned to caller * @list: Buffer or chain of buffers to be returned to caller
*
* Returns message data size or errno: -ENOMEM, -EFAULT * Returns message data size or errno: -ENOMEM, -EFAULT
*/ */
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
int offset, int dsz, int pktmax , struct sk_buff **chain) int dsz, int pktmax, struct sk_buff_head *list)
{ {
int mhsz = msg_hdr_sz(mhdr); int mhsz = msg_hdr_sz(mhdr);
int msz = mhsz + dsz; int msz = mhsz + dsz;
...@@ -179,22 +180,22 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, ...@@ -179,22 +180,22 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
int pktrem = pktmax; int pktrem = pktmax;
int drem = dsz; int drem = dsz;
struct tipc_msg pkthdr; struct tipc_msg pkthdr;
struct sk_buff *buf, *prev; struct sk_buff *skb;
char *pktpos; char *pktpos;
int rc; int rc;
uint chain_sz = 0;
msg_set_size(mhdr, msz); msg_set_size(mhdr, msz);
/* No fragmentation needed? */ /* No fragmentation needed? */
if (likely(msz <= pktmax)) { if (likely(msz <= pktmax)) {
buf = tipc_buf_acquire(msz); skb = tipc_buf_acquire(msz);
*chain = buf; if (unlikely(!skb))
if (unlikely(!buf))
return -ENOMEM; return -ENOMEM;
skb_copy_to_linear_data(buf, mhdr, mhsz); __skb_queue_tail(list, skb);
pktpos = buf->data + mhsz; skb_copy_to_linear_data(skb, mhdr, mhsz);
TIPC_SKB_CB(buf)->chain_sz = 1; pktpos = skb->data + mhsz;
if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iov, offset, dsz)) if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iov, offset,
dsz))
return dsz; return dsz;
rc = -EFAULT; rc = -EFAULT;
goto error; goto error;
...@@ -207,15 +208,15 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, ...@@ -207,15 +208,15 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
msg_set_fragm_no(&pkthdr, pktno); msg_set_fragm_no(&pkthdr, pktno);
/* Prepare first fragment */ /* Prepare first fragment */
*chain = buf = tipc_buf_acquire(pktmax); skb = tipc_buf_acquire(pktmax);
if (!buf) if (!skb)
return -ENOMEM; return -ENOMEM;
chain_sz = 1; __skb_queue_tail(list, skb);
pktpos = buf->data; pktpos = skb->data;
skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE); skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
pktpos += INT_H_SIZE; pktpos += INT_H_SIZE;
pktrem -= INT_H_SIZE; pktrem -= INT_H_SIZE;
skb_copy_to_linear_data_offset(buf, INT_H_SIZE, mhdr, mhsz); skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
pktpos += mhsz; pktpos += mhsz;
pktrem -= mhsz; pktrem -= mhsz;
...@@ -238,28 +239,25 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, ...@@ -238,28 +239,25 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
pktsz = drem + INT_H_SIZE; pktsz = drem + INT_H_SIZE;
else else
pktsz = pktmax; pktsz = pktmax;
prev = buf; skb = tipc_buf_acquire(pktsz);
buf = tipc_buf_acquire(pktsz); if (!skb) {
if (!buf) {
rc = -ENOMEM; rc = -ENOMEM;
goto error; goto error;
} }
chain_sz++; __skb_queue_tail(list, skb);
prev->next = buf;
msg_set_type(&pkthdr, FRAGMENT); msg_set_type(&pkthdr, FRAGMENT);
msg_set_size(&pkthdr, pktsz); msg_set_size(&pkthdr, pktsz);
msg_set_fragm_no(&pkthdr, ++pktno); msg_set_fragm_no(&pkthdr, ++pktno);
skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE); skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
pktpos = buf->data + INT_H_SIZE; pktpos = skb->data + INT_H_SIZE;
pktrem = pktsz - INT_H_SIZE; pktrem = pktsz - INT_H_SIZE;
} while (1); } while (1);
TIPC_SKB_CB(*chain)->chain_sz = chain_sz; msg_set_type(buf_msg(skb), LAST_FRAGMENT);
msg_set_type(buf_msg(buf), LAST_FRAGMENT);
return dsz; return dsz;
error: error:
kfree_skb_list(*chain); __skb_queue_purge(list);
*chain = NULL; __skb_queue_head_init(list);
return rc; return rc;
} }
...@@ -430,22 +428,23 @@ int tipc_msg_eval(struct sk_buff *buf, u32 *dnode) ...@@ -430,22 +428,23 @@ int tipc_msg_eval(struct sk_buff *buf, u32 *dnode)
/* tipc_msg_reassemble() - clone a buffer chain of fragments and /* tipc_msg_reassemble() - clone a buffer chain of fragments and
* reassemble the clones into one message * reassemble the clones into one message
*/ */
struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain) struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list)
{ {
struct sk_buff *buf = chain; struct sk_buff *skb;
struct sk_buff *frag = buf; struct sk_buff *frag = NULL;
struct sk_buff *head = NULL; struct sk_buff *head = NULL;
int hdr_sz; int hdr_sz;
/* Copy header if single buffer */ /* Copy header if single buffer */
if (!buf->next) { if (skb_queue_len(list) == 1) {
hdr_sz = skb_headroom(buf) + msg_hdr_sz(buf_msg(buf)); skb = skb_peek(list);
return __pskb_copy(buf, hdr_sz, GFP_ATOMIC); hdr_sz = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
return __pskb_copy(skb, hdr_sz, GFP_ATOMIC);
} }
/* Clone all fragments and reassemble */ /* Clone all fragments and reassemble */
while (buf) { skb_queue_walk(list, skb) {
frag = skb_clone(buf, GFP_ATOMIC); frag = skb_clone(skb, GFP_ATOMIC);
if (!frag) if (!frag)
goto error; goto error;
frag->next = NULL; frag->next = NULL;
...@@ -453,7 +452,6 @@ struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain) ...@@ -453,7 +452,6 @@ struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain)
break; break;
if (!head) if (!head)
goto error; goto error;
buf = buf->next;
} }
return frag; return frag;
error: error:
......
...@@ -739,9 +739,9 @@ bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu); ...@@ -739,9 +739,9 @@ bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu);
bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb, bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
u32 mtu, u32 dnode); u32 mtu, u32 dnode);
int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
int offset, int dsz, int mtu , struct sk_buff **chain); int dsz, int mtu, struct sk_buff_head *list);
struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain); struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
#endif #endif
...@@ -114,9 +114,9 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest) ...@@ -114,9 +114,9 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
return buf; return buf;
} }
void named_cluster_distribute(struct sk_buff *buf) void named_cluster_distribute(struct sk_buff *skb)
{ {
struct sk_buff *obuf; struct sk_buff *oskb;
struct tipc_node *node; struct tipc_node *node;
u32 dnode; u32 dnode;
...@@ -127,15 +127,15 @@ void named_cluster_distribute(struct sk_buff *buf) ...@@ -127,15 +127,15 @@ void named_cluster_distribute(struct sk_buff *buf)
continue; continue;
if (!tipc_node_active_links(node)) if (!tipc_node_active_links(node))
continue; continue;
obuf = skb_copy(buf, GFP_ATOMIC); oskb = skb_copy(skb, GFP_ATOMIC);
if (!obuf) if (!oskb)
break; break;
msg_set_destnode(buf_msg(obuf), dnode); msg_set_destnode(buf_msg(oskb), dnode);
tipc_link_xmit(obuf, dnode, dnode); tipc_link_xmit_skb(oskb, dnode, dnode);
} }
rcu_read_unlock(); rcu_read_unlock();
kfree_skb(buf); kfree_skb(skb);
} }
/** /**
...@@ -190,15 +190,15 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ) ...@@ -190,15 +190,15 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ)
/** /**
* named_distribute - prepare name info for bulk distribution to another node * named_distribute - prepare name info for bulk distribution to another node
* @msg_list: list of messages (buffers) to be returned from this function * @list: list of messages (buffers) to be returned from this function
* @dnode: node to be updated * @dnode: node to be updated
* @pls: linked list of publication items to be packed into buffer chain * @pls: linked list of publication items to be packed into buffer chain
*/ */
static void named_distribute(struct list_head *msg_list, u32 dnode, static void named_distribute(struct sk_buff_head *list, u32 dnode,
struct publ_list *pls) struct publ_list *pls)
{ {
struct publication *publ; struct publication *publ;
struct sk_buff *buf = NULL; struct sk_buff *skb = NULL;
struct distr_item *item = NULL; struct distr_item *item = NULL;
uint dsz = pls->size * ITEM_SIZE; uint dsz = pls->size * ITEM_SIZE;
uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE; uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE;
...@@ -207,15 +207,15 @@ static void named_distribute(struct list_head *msg_list, u32 dnode, ...@@ -207,15 +207,15 @@ static void named_distribute(struct list_head *msg_list, u32 dnode,
list_for_each_entry(publ, &pls->list, local_list) { list_for_each_entry(publ, &pls->list, local_list) {
/* Prepare next buffer: */ /* Prepare next buffer: */
if (!buf) { if (!skb) {
msg_rem = min_t(uint, rem, msg_dsz); msg_rem = min_t(uint, rem, msg_dsz);
rem -= msg_rem; rem -= msg_rem;
buf = named_prepare_buf(PUBLICATION, msg_rem, dnode); skb = named_prepare_buf(PUBLICATION, msg_rem, dnode);
if (!buf) { if (!skb) {
pr_warn("Bulk publication failure\n"); pr_warn("Bulk publication failure\n");
return; return;
} }
item = (struct distr_item *)msg_data(buf_msg(buf)); item = (struct distr_item *)msg_data(buf_msg(skb));
} }
/* Pack publication into message: */ /* Pack publication into message: */
...@@ -225,8 +225,8 @@ static void named_distribute(struct list_head *msg_list, u32 dnode, ...@@ -225,8 +225,8 @@ static void named_distribute(struct list_head *msg_list, u32 dnode,
/* Append full buffer to list: */ /* Append full buffer to list: */
if (!msg_rem) { if (!msg_rem) {
list_add_tail((struct list_head *)buf, msg_list); __skb_queue_tail(list, skb);
buf = NULL; skb = NULL;
} }
} }
} }
...@@ -236,18 +236,16 @@ static void named_distribute(struct list_head *msg_list, u32 dnode, ...@@ -236,18 +236,16 @@ static void named_distribute(struct list_head *msg_list, u32 dnode,
*/ */
void tipc_named_node_up(u32 dnode) void tipc_named_node_up(u32 dnode)
{ {
LIST_HEAD(msg_list); struct sk_buff_head head;
struct sk_buff *buf_chain;
__skb_queue_head_init(&head);
read_lock_bh(&tipc_nametbl_lock); read_lock_bh(&tipc_nametbl_lock);
named_distribute(&msg_list, dnode, &publ_cluster); named_distribute(&head, dnode, &publ_cluster);
named_distribute(&msg_list, dnode, &publ_zone); named_distribute(&head, dnode, &publ_zone);
read_unlock_bh(&tipc_nametbl_lock); read_unlock_bh(&tipc_nametbl_lock);
/* Convert circular list to linear list and send: */ tipc_link_xmit(&head, dnode, dnode);
buf_chain = (struct sk_buff *)msg_list.next;
((struct sk_buff *)msg_list.prev)->next = NULL;
tipc_link_xmit(buf_chain, dnode, dnode);
} }
static void tipc_publ_subscribe(struct publication *publ, u32 addr) static void tipc_publ_subscribe(struct publication *publ, u32 addr)
......
...@@ -244,12 +244,12 @@ static void tsk_advance_rx_queue(struct sock *sk) ...@@ -244,12 +244,12 @@ static void tsk_advance_rx_queue(struct sock *sk)
*/ */
static void tsk_rej_rx_queue(struct sock *sk) static void tsk_rej_rx_queue(struct sock *sk)
{ {
struct sk_buff *buf; struct sk_buff *skb;
u32 dnode; u32 dnode;
while ((buf = __skb_dequeue(&sk->sk_receive_queue))) { while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT)) if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
tipc_link_xmit(buf, dnode, 0); tipc_link_xmit_skb(skb, dnode, 0);
} }
} }
...@@ -462,7 +462,7 @@ static int tipc_release(struct socket *sock) ...@@ -462,7 +462,7 @@ static int tipc_release(struct socket *sock)
{ {
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
struct tipc_sock *tsk; struct tipc_sock *tsk;
struct sk_buff *buf; struct sk_buff *skb;
u32 dnode; u32 dnode;
/* /*
...@@ -481,11 +481,11 @@ static int tipc_release(struct socket *sock) ...@@ -481,11 +481,11 @@ static int tipc_release(struct socket *sock)
*/ */
dnode = tsk_peer_node(tsk); dnode = tsk_peer_node(tsk);
while (sock->state != SS_DISCONNECTING) { while (sock->state != SS_DISCONNECTING) {
buf = __skb_dequeue(&sk->sk_receive_queue); skb = __skb_dequeue(&sk->sk_receive_queue);
if (buf == NULL) if (skb == NULL)
break; break;
if (TIPC_SKB_CB(buf)->handle != NULL) if (TIPC_SKB_CB(skb)->handle != NULL)
kfree_skb(buf); kfree_skb(skb);
else { else {
if ((sock->state == SS_CONNECTING) || if ((sock->state == SS_CONNECTING) ||
(sock->state == SS_CONNECTED)) { (sock->state == SS_CONNECTED)) {
...@@ -493,8 +493,8 @@ static int tipc_release(struct socket *sock) ...@@ -493,8 +493,8 @@ static int tipc_release(struct socket *sock)
tsk->connected = 0; tsk->connected = 0;
tipc_node_remove_conn(dnode, tsk->ref); tipc_node_remove_conn(dnode, tsk->ref);
} }
if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT)) if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
tipc_link_xmit(buf, dnode, 0); tipc_link_xmit_skb(skb, dnode, 0);
} }
} }
...@@ -502,12 +502,12 @@ static int tipc_release(struct socket *sock) ...@@ -502,12 +502,12 @@ static int tipc_release(struct socket *sock)
tipc_sk_ref_discard(tsk->ref); tipc_sk_ref_discard(tsk->ref);
k_cancel_timer(&tsk->timer); k_cancel_timer(&tsk->timer);
if (tsk->connected) { if (tsk->connected) {
buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
SHORT_H_SIZE, 0, dnode, tipc_own_addr, SHORT_H_SIZE, 0, dnode, tipc_own_addr,
tsk_peer_port(tsk), tsk_peer_port(tsk),
tsk->ref, TIPC_ERR_NO_PORT); tsk->ref, TIPC_ERR_NO_PORT);
if (buf) if (skb)
tipc_link_xmit(buf, dnode, tsk->ref); tipc_link_xmit_skb(skb, dnode, tsk->ref);
tipc_node_remove_conn(dnode, tsk->ref); tipc_node_remove_conn(dnode, tsk->ref);
} }
k_term_timer(&tsk->timer); k_term_timer(&tsk->timer);
...@@ -712,7 +712,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, ...@@ -712,7 +712,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
{ {
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
struct tipc_msg *mhdr = &tipc_sk(sk)->phdr; struct tipc_msg *mhdr = &tipc_sk(sk)->phdr;
struct sk_buff *buf; struct sk_buff_head head;
uint mtu; uint mtu;
int rc; int rc;
...@@ -727,12 +727,13 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, ...@@ -727,12 +727,13 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
new_mtu: new_mtu:
mtu = tipc_bclink_get_mtu(); mtu = tipc_bclink_get_mtu();
rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &buf); __skb_queue_head_init(&head);
rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &head);
if (unlikely(rc < 0)) if (unlikely(rc < 0))
return rc; return rc;
do { do {
rc = tipc_bclink_xmit(buf); rc = tipc_bclink_xmit(&head);
if (likely(rc >= 0)) { if (likely(rc >= 0)) {
rc = dsz; rc = dsz;
break; break;
...@@ -744,7 +745,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, ...@@ -744,7 +745,7 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
tipc_sk(sk)->link_cong = 1; tipc_sk(sk)->link_cong = 1;
rc = tipc_wait_for_sndmsg(sock, &timeo); rc = tipc_wait_for_sndmsg(sock, &timeo);
if (rc) if (rc)
kfree_skb_list(buf); __skb_queue_purge(&head);
} while (!rc); } while (!rc);
return rc; return rc;
} }
...@@ -906,7 +907,8 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, ...@@ -906,7 +907,8 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
struct tipc_sock *tsk = tipc_sk(sk); struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_msg *mhdr = &tsk->phdr; struct tipc_msg *mhdr = &tsk->phdr;
u32 dnode, dport; u32 dnode, dport;
struct sk_buff *buf; struct sk_buff_head head;
struct sk_buff *skb;
struct tipc_name_seq *seq = &dest->addr.nameseq; struct tipc_name_seq *seq = &dest->addr.nameseq;
u32 mtu; u32 mtu;
long timeo; long timeo;
...@@ -981,13 +983,15 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, ...@@ -981,13 +983,15 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
new_mtu: new_mtu:
mtu = tipc_node_get_mtu(dnode, tsk->ref); mtu = tipc_node_get_mtu(dnode, tsk->ref);
rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &buf); __skb_queue_head_init(&head);
rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &head);
if (rc < 0) if (rc < 0)
goto exit; goto exit;
do { do {
TIPC_SKB_CB(buf)->wakeup_pending = tsk->link_cong; skb = skb_peek(&head);
rc = tipc_link_xmit(buf, dnode, tsk->ref); TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
rc = tipc_link_xmit(&head, dnode, tsk->ref);
if (likely(rc >= 0)) { if (likely(rc >= 0)) {
if (sock->state != SS_READY) if (sock->state != SS_READY)
sock->state = SS_CONNECTING; sock->state = SS_CONNECTING;
...@@ -1001,7 +1005,7 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock, ...@@ -1001,7 +1005,7 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
tsk->link_cong = 1; tsk->link_cong = 1;
rc = tipc_wait_for_sndmsg(sock, &timeo); rc = tipc_wait_for_sndmsg(sock, &timeo);
if (rc) if (rc)
kfree_skb_list(buf); __skb_queue_purge(&head);
} while (!rc); } while (!rc);
exit: exit:
if (iocb) if (iocb)
...@@ -1058,7 +1062,7 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock, ...@@ -1058,7 +1062,7 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk); struct tipc_sock *tsk = tipc_sk(sk);
struct tipc_msg *mhdr = &tsk->phdr; struct tipc_msg *mhdr = &tsk->phdr;
struct sk_buff *buf; struct sk_buff_head head;
DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
u32 ref = tsk->ref; u32 ref = tsk->ref;
int rc = -EINVAL; int rc = -EINVAL;
...@@ -1093,12 +1097,13 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock, ...@@ -1093,12 +1097,13 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
next: next:
mtu = tsk->max_pkt; mtu = tsk->max_pkt;
send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE); send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
rc = tipc_msg_build(mhdr, m, sent, send, mtu, &buf); __skb_queue_head_init(&head);
rc = tipc_msg_build(mhdr, m, sent, send, mtu, &head);
if (unlikely(rc < 0)) if (unlikely(rc < 0))
goto exit; goto exit;
do { do {
if (likely(!tsk_conn_cong(tsk))) { if (likely(!tsk_conn_cong(tsk))) {
rc = tipc_link_xmit(buf, dnode, ref); rc = tipc_link_xmit(&head, dnode, ref);
if (likely(!rc)) { if (likely(!rc)) {
tsk->sent_unacked++; tsk->sent_unacked++;
sent += send; sent += send;
...@@ -1116,7 +1121,7 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock, ...@@ -1116,7 +1121,7 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
} }
rc = tipc_wait_for_sndpkt(sock, &timeo); rc = tipc_wait_for_sndpkt(sock, &timeo);
if (rc) if (rc)
kfree_skb_list(buf); __skb_queue_purge(&head);
} while (!rc); } while (!rc);
exit: exit:
if (iocb) if (iocb)
...@@ -1261,20 +1266,20 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg, ...@@ -1261,20 +1266,20 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack) static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
{ {
struct sk_buff *buf = NULL; struct sk_buff *skb = NULL;
struct tipc_msg *msg; struct tipc_msg *msg;
u32 peer_port = tsk_peer_port(tsk); u32 peer_port = tsk_peer_port(tsk);
u32 dnode = tsk_peer_node(tsk); u32 dnode = tsk_peer_node(tsk);
if (!tsk->connected) if (!tsk->connected)
return; return;
buf = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode, skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode,
tipc_own_addr, peer_port, tsk->ref, TIPC_OK); tipc_own_addr, peer_port, tsk->ref, TIPC_OK);
if (!buf) if (!skb)
return; return;
msg = buf_msg(buf); msg = buf_msg(skb);
msg_set_msgcnt(msg, ack); msg_set_msgcnt(msg, ack);
tipc_link_xmit(buf, dnode, msg_link_selector(msg)); tipc_link_xmit_skb(skb, dnode, msg_link_selector(msg));
} }
static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
...@@ -1729,20 +1734,20 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf) ...@@ -1729,20 +1734,20 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf)
/** /**
* tipc_backlog_rcv - handle incoming message from backlog queue * tipc_backlog_rcv - handle incoming message from backlog queue
* @sk: socket * @sk: socket
* @buf: message * @skb: message
* *
* Caller must hold socket lock, but not port lock. * Caller must hold socket lock, but not port lock.
* *
* Returns 0 * Returns 0
*/ */
static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf) static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
{ {
int rc; int rc;
u32 onode; u32 onode;
struct tipc_sock *tsk = tipc_sk(sk); struct tipc_sock *tsk = tipc_sk(sk);
uint truesize = buf->truesize; uint truesize = skb->truesize;
rc = filter_rcv(sk, buf); rc = filter_rcv(sk, skb);
if (likely(!rc)) { if (likely(!rc)) {
if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT) if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
...@@ -1750,25 +1755,25 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf) ...@@ -1750,25 +1755,25 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf)
return 0; return 0;
} }
if ((rc < 0) && !tipc_msg_reverse(buf, &onode, -rc)) if ((rc < 0) && !tipc_msg_reverse(skb, &onode, -rc))
return 0; return 0;
tipc_link_xmit(buf, onode, 0); tipc_link_xmit_skb(skb, onode, 0);
return 0; return 0;
} }
/** /**
* tipc_sk_rcv - handle incoming message * tipc_sk_rcv - handle incoming message
* @buf: buffer containing arriving message * @skb: buffer containing arriving message
* Consumes buffer * Consumes buffer
* Returns 0 if success, or errno: -EHOSTUNREACH * Returns 0 if success, or errno: -EHOSTUNREACH
*/ */
int tipc_sk_rcv(struct sk_buff *buf) int tipc_sk_rcv(struct sk_buff *skb)
{ {
struct tipc_sock *tsk; struct tipc_sock *tsk;
struct sock *sk; struct sock *sk;
u32 dport = msg_destport(buf_msg(buf)); u32 dport = msg_destport(buf_msg(skb));
int rc = TIPC_OK; int rc = TIPC_OK;
uint limit; uint limit;
u32 dnode; u32 dnode;
...@@ -1776,7 +1781,7 @@ int tipc_sk_rcv(struct sk_buff *buf) ...@@ -1776,7 +1781,7 @@ int tipc_sk_rcv(struct sk_buff *buf)
/* Validate destination and message */ /* Validate destination and message */
tsk = tipc_sk_get(dport); tsk = tipc_sk_get(dport);
if (unlikely(!tsk)) { if (unlikely(!tsk)) {
rc = tipc_msg_eval(buf, &dnode); rc = tipc_msg_eval(skb, &dnode);
goto exit; goto exit;
} }
sk = &tsk->sk; sk = &tsk->sk;
...@@ -1785,12 +1790,12 @@ int tipc_sk_rcv(struct sk_buff *buf) ...@@ -1785,12 +1790,12 @@ int tipc_sk_rcv(struct sk_buff *buf)
spin_lock_bh(&sk->sk_lock.slock); spin_lock_bh(&sk->sk_lock.slock);
if (!sock_owned_by_user(sk)) { if (!sock_owned_by_user(sk)) {
rc = filter_rcv(sk, buf); rc = filter_rcv(sk, skb);
} else { } else {
if (sk->sk_backlog.len == 0) if (sk->sk_backlog.len == 0)
atomic_set(&tsk->dupl_rcvcnt, 0); atomic_set(&tsk->dupl_rcvcnt, 0);
limit = rcvbuf_limit(sk, buf) + atomic_read(&tsk->dupl_rcvcnt); limit = rcvbuf_limit(sk, skb) + atomic_read(&tsk->dupl_rcvcnt);
if (sk_add_backlog(sk, buf, limit)) if (sk_add_backlog(sk, skb, limit))
rc = -TIPC_ERR_OVERLOAD; rc = -TIPC_ERR_OVERLOAD;
} }
spin_unlock_bh(&sk->sk_lock.slock); spin_unlock_bh(&sk->sk_lock.slock);
...@@ -1798,10 +1803,10 @@ int tipc_sk_rcv(struct sk_buff *buf) ...@@ -1798,10 +1803,10 @@ int tipc_sk_rcv(struct sk_buff *buf)
if (likely(!rc)) if (likely(!rc))
return 0; return 0;
exit: exit:
if ((rc < 0) && !tipc_msg_reverse(buf, &dnode, -rc)) if ((rc < 0) && !tipc_msg_reverse(skb, &dnode, -rc))
return -EHOSTUNREACH; return -EHOSTUNREACH;
tipc_link_xmit(buf, dnode, 0); tipc_link_xmit_skb(skb, dnode, 0);
return (rc < 0) ? -EHOSTUNREACH : 0; return (rc < 0) ? -EHOSTUNREACH : 0;
} }
...@@ -2059,7 +2064,7 @@ static int tipc_shutdown(struct socket *sock, int how) ...@@ -2059,7 +2064,7 @@ static int tipc_shutdown(struct socket *sock, int how)
{ {
struct sock *sk = sock->sk; struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk); struct tipc_sock *tsk = tipc_sk(sk);
struct sk_buff *buf; struct sk_buff *skb;
u32 dnode; u32 dnode;
int res; int res;
...@@ -2074,23 +2079,23 @@ static int tipc_shutdown(struct socket *sock, int how) ...@@ -2074,23 +2079,23 @@ static int tipc_shutdown(struct socket *sock, int how)
restart: restart:
/* Disconnect and send a 'FIN+' or 'FIN-' message to peer */ /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
buf = __skb_dequeue(&sk->sk_receive_queue); skb = __skb_dequeue(&sk->sk_receive_queue);
if (buf) { if (skb) {
if (TIPC_SKB_CB(buf)->handle != NULL) { if (TIPC_SKB_CB(skb)->handle != NULL) {
kfree_skb(buf); kfree_skb(skb);
goto restart; goto restart;
} }
if (tipc_msg_reverse(buf, &dnode, TIPC_CONN_SHUTDOWN)) if (tipc_msg_reverse(skb, &dnode, TIPC_CONN_SHUTDOWN))
tipc_link_xmit(buf, dnode, tsk->ref); tipc_link_xmit_skb(skb, dnode, tsk->ref);
tipc_node_remove_conn(dnode, tsk->ref); tipc_node_remove_conn(dnode, tsk->ref);
} else { } else {
dnode = tsk_peer_node(tsk); dnode = tsk_peer_node(tsk);
buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
TIPC_CONN_MSG, SHORT_H_SIZE, TIPC_CONN_MSG, SHORT_H_SIZE,
0, dnode, tipc_own_addr, 0, dnode, tipc_own_addr,
tsk_peer_port(tsk), tsk_peer_port(tsk),
tsk->ref, TIPC_CONN_SHUTDOWN); tsk->ref, TIPC_CONN_SHUTDOWN);
tipc_link_xmit(buf, dnode, tsk->ref); tipc_link_xmit_skb(skb, dnode, tsk->ref);
} }
tsk->connected = 0; tsk->connected = 0;
sock->state = SS_DISCONNECTING; sock->state = SS_DISCONNECTING;
...@@ -2119,7 +2124,7 @@ static void tipc_sk_timeout(unsigned long ref) ...@@ -2119,7 +2124,7 @@ static void tipc_sk_timeout(unsigned long ref)
{ {
struct tipc_sock *tsk; struct tipc_sock *tsk;
struct sock *sk; struct sock *sk;
struct sk_buff *buf = NULL; struct sk_buff *skb = NULL;
u32 peer_port, peer_node; u32 peer_port, peer_node;
tsk = tipc_sk_get(ref); tsk = tipc_sk_get(ref);
...@@ -2137,20 +2142,20 @@ static void tipc_sk_timeout(unsigned long ref) ...@@ -2137,20 +2142,20 @@ static void tipc_sk_timeout(unsigned long ref)
if (tsk->probing_state == TIPC_CONN_PROBING) { if (tsk->probing_state == TIPC_CONN_PROBING) {
/* Previous probe not answered -> self abort */ /* Previous probe not answered -> self abort */
buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
SHORT_H_SIZE, 0, tipc_own_addr, SHORT_H_SIZE, 0, tipc_own_addr,
peer_node, ref, peer_port, peer_node, ref, peer_port,
TIPC_ERR_NO_PORT); TIPC_ERR_NO_PORT);
} else { } else {
buf = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE,
0, peer_node, tipc_own_addr, 0, peer_node, tipc_own_addr,
peer_port, ref, TIPC_OK); peer_port, ref, TIPC_OK);
tsk->probing_state = TIPC_CONN_PROBING; tsk->probing_state = TIPC_CONN_PROBING;
k_start_timer(&tsk->timer, tsk->probing_interval); k_start_timer(&tsk->timer, tsk->probing_interval);
} }
bh_unlock_sock(sk); bh_unlock_sock(sk);
if (buf) if (skb)
tipc_link_xmit(buf, peer_node, ref); tipc_link_xmit_skb(skb, peer_node, ref);
exit: exit:
tipc_sk_put(tsk); tipc_sk_put(tsk);
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册