提交 3a691015 编写于 作者: D David S. Miller

Merge tag 'rxrpc-rewrite-20160823-1' of...

Merge tag 'rxrpc-rewrite-20160823-1' of git://git.kernel.org/pub/scm/linux/kernel/git/dhowells/linux-fs

David Howells says:

====================
rxrpc: Cleanups

Here are some cleanups for the AF_RXRPC rewrite:

 (1) Remove some unused bits.

 (2) Call releasing on socket closure is now done in the order in which
     calls progress through the phases so that we don't miss a call
     actively moving list.

 (3) The rxrpc_call struct's channel number field is redundant and replaced
     with accesses to the masked off cid field instead.

 (4) Use a tracepoint for socket buffer accounting rather than printks.

     Unfortunately, since this would require currently non-existend
     arch-specific help to divine the current instruction location, the
     accounting functions are moved out of line so that
     __builtin_return_address() can be used.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
/* AF_RXRPC tracepoints
*
* Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM rxrpc
#if !defined(_TRACE_RXRPC_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_RXRPC_H
#include <linux/tracepoint.h>
TRACE_EVENT(rxrpc_skb,
TP_PROTO(struct sk_buff *skb, int op, int usage, int mod_count,
const void *where),
TP_ARGS(skb, op, usage, mod_count, where),
TP_STRUCT__entry(
__field(struct sk_buff *, skb )
__field(int, op )
__field(int, usage )
__field(int, mod_count )
__field(const void *, where )
),
TP_fast_assign(
__entry->skb = skb;
__entry->op = op;
__entry->usage = usage;
__entry->mod_count = mod_count;
__entry->where = where;
),
TP_printk("s=%p %s u=%d m=%d p=%pSR",
__entry->skb,
(__entry->op == 0 ? "NEW" :
__entry->op == 1 ? "SEE" :
__entry->op == 2 ? "GET" :
__entry->op == 3 ? "FRE" :
"PUR"),
__entry->usage,
__entry->mod_count,
__entry->where)
);
#endif /* _TRACE_RXRPC_H */
/* This part must be outside protection */
#include <trace/define_trace.h>
......@@ -22,6 +22,7 @@
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#define CREATE_TRACE_POINTS
#include "ar-internal.h"
MODULE_DESCRIPTION("RxRPC network protocol");
......
......@@ -341,10 +341,10 @@ enum rxrpc_call_flag {
RXRPC_CALL_RCVD_LAST, /* all packets received */
RXRPC_CALL_RUN_RTIMER, /* Tx resend timer started */
RXRPC_CALL_TX_SOFT_ACK, /* sent some soft ACKs */
RXRPC_CALL_PROC_BUSY, /* the processor is busy */
RXRPC_CALL_INIT_ACCEPT, /* acceptance was initiated */
RXRPC_CALL_HAS_USERID, /* has a user ID attached */
RXRPC_CALL_EXPECT_OOS, /* expect out of sequence packets */
RXRPC_CALL_IS_SERVICE, /* Call is service call */
};
/*
......@@ -432,8 +432,10 @@ struct rxrpc_call {
int error_report; /* Network error (ICMP/local transport) */
int error; /* Local error incurred */
enum rxrpc_call_state state : 8; /* current state of call */
u16 service_id; /* service ID */
u32 call_id; /* call ID on connection */
u32 cid; /* connection ID plus channel index */
int debug_id; /* debug ID for printks */
u8 channel; /* connection channel occupied by this call */
/* transmission-phase ACK management */
u8 acks_head; /* offset into window of first entry */
......@@ -461,13 +463,6 @@ struct rxrpc_call {
/* received packet records, 1 bit per record */
#define RXRPC_ACKR_WINDOW_ASZ DIV_ROUND_UP(RXRPC_MAXACKS, BITS_PER_LONG)
unsigned long ackr_window[RXRPC_ACKR_WINDOW_ASZ + 1];
u8 in_clientflag; /* Copy of conn->in_clientflag */
struct rxrpc_local *local; /* Local endpoint. */
u32 call_id; /* call ID on connection */
u32 cid; /* connection ID plus channel index */
u32 epoch; /* epoch of this connection */
u16 service_id; /* service ID */
};
/*
......@@ -484,6 +479,8 @@ static inline void rxrpc_abort_call(struct rxrpc_call *call, u32 abort_code)
write_unlock_bh(&call->state_lock);
}
#include <trace/events/rxrpc.h>
/*
* af_rxrpc.c
*/
......@@ -528,6 +525,16 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
void __rxrpc_put_call(struct rxrpc_call *);
void __exit rxrpc_destroy_all_calls(void);
static inline bool rxrpc_is_service_call(const struct rxrpc_call *call)
{
return test_bit(RXRPC_CALL_IS_SERVICE, &call->flags);
}
static inline bool rxrpc_is_client_call(const struct rxrpc_call *call)
{
return !rxrpc_is_service_call(call);
}
/*
* conn_client.c
*/
......@@ -747,6 +754,11 @@ int rxrpc_init_server_conn_security(struct rxrpc_connection *);
* skbuff.c
*/
void rxrpc_packet_destructor(struct sk_buff *);
void rxrpc_new_skb(struct sk_buff *);
void rxrpc_see_skb(struct sk_buff *);
void rxrpc_get_skb(struct sk_buff *);
void rxrpc_free_skb(struct sk_buff *);
void rxrpc_purge_queue(struct sk_buff_head *);
/*
* sysctl.c
......@@ -894,44 +906,6 @@ do { \
#endif /* __KDEBUGALL */
/*
* socket buffer accounting / leak finding
*/
static inline void __rxrpc_new_skb(struct sk_buff *skb, const char *fn)
{
//_net("new skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs));
//atomic_inc(&rxrpc_n_skbs);
}
#define rxrpc_new_skb(skb) __rxrpc_new_skb((skb), __func__)
static inline void __rxrpc_kill_skb(struct sk_buff *skb, const char *fn)
{
//_net("kill skb %p %s [%d]", skb, fn, atomic_read(&rxrpc_n_skbs));
//atomic_dec(&rxrpc_n_skbs);
}
#define rxrpc_kill_skb(skb) __rxrpc_kill_skb((skb), __func__)
static inline void __rxrpc_free_skb(struct sk_buff *skb, const char *fn)
{
if (skb) {
CHECK_SLAB_OKAY(&skb->users);
//_net("free skb %p %s [%d]",
// skb, fn, atomic_read(&rxrpc_n_skbs));
//atomic_dec(&rxrpc_n_skbs);
kfree_skb(skb);
}
}
#define rxrpc_free_skb(skb) __rxrpc_free_skb((skb), __func__)
static inline void rxrpc_purge_queue(struct sk_buff_head *list)
{
struct sk_buff *skb;
while ((skb = skb_dequeue((list))) != NULL)
rxrpc_free_skb(skb);
}
#define rxrpc_get_call(CALL) \
do { \
......
......@@ -203,6 +203,7 @@ void rxrpc_accept_incoming_calls(struct rxrpc_local *local)
_net("incoming call skb %p", skb);
rxrpc_see_skb(skb);
sp = rxrpc_skb(skb);
/* Set up a response packet header in case we need it */
......
......@@ -407,6 +407,7 @@ static int rxrpc_drain_rx_oos_queue(struct rxrpc_call *call)
skb = skb_dequeue(&call->rx_oos_queue);
if (skb) {
rxrpc_see_skb(skb);
sp = rxrpc_skb(skb);
_debug("drain OOS packet %d [%d]",
......@@ -427,6 +428,7 @@ static int rxrpc_drain_rx_oos_queue(struct rxrpc_call *call)
/* find out what the next packet is */
skb = skb_peek(&call->rx_oos_queue);
rxrpc_see_skb(skb);
if (skb)
call->rx_first_oos = rxrpc_skb(skb)->hdr.seq;
else
......@@ -576,6 +578,7 @@ static int rxrpc_process_rx_queue(struct rxrpc_call *call,
if (!skb)
return -EAGAIN;
rxrpc_see_skb(skb);
_net("deferred skb %p", skb);
sp = rxrpc_skb(skb);
......@@ -832,11 +835,6 @@ void rxrpc_process_call(struct work_struct *work)
call->debug_id, rxrpc_call_states[call->state], call->events,
(jiffies - call->creation_jif) / (HZ / 10));
if (test_and_set_bit(RXRPC_CALL_PROC_BUSY, &call->flags)) {
_debug("XXXXXXXXXXXXX RUNNING ON MULTIPLE CPUS XXXXXXXXXXXXX");
return;
}
if (!call->conn)
goto skip_msg_init;
......@@ -1281,7 +1279,6 @@ void rxrpc_process_call(struct work_struct *work)
}
error:
clear_bit(RXRPC_CALL_PROC_BUSY, &call->flags);
kfree(acks);
/* because we don't want two CPUs both processing the work item for one
......
......@@ -167,10 +167,7 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
sock_hold(&rx->sk);
call->socket = rx;
call->rx_data_post = 1;
call->local = rx->local;
call->service_id = srx->srx_service;
call->in_clientflag = 0;
_leave(" = %p", call);
return call;
......@@ -320,9 +317,9 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
candidate->conn = conn;
candidate->cid = sp->hdr.cid;
candidate->call_id = sp->hdr.callNumber;
candidate->channel = chan;
candidate->rx_data_post = 0;
candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
candidate->flags |= (1 << RXRPC_CALL_IS_SERVICE);
if (conn->security_ix > 0)
candidate->state = RXRPC_CALL_SERVER_SECURING;
......@@ -332,7 +329,7 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
call = rcu_dereference_protected(conn->channels[chan].call,
lockdep_is_held(&conn->channel_lock));
_debug("channel[%u] is %p", candidate->channel, call);
_debug("channel[%u] is %p", candidate->cid & RXRPC_CHANNELMASK, call);
if (call && call->call_id == sp->hdr.callNumber) {
/* already set; must've been a duplicate packet */
_debug("extant call [%d]", call->state);
......@@ -397,10 +394,7 @@ struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
list_add_tail(&call->link, &rxrpc_calls);
write_unlock_bh(&rxrpc_call_lock);
call->local = conn->params.local;
call->epoch = conn->proto.epoch;
call->service_id = conn->params.service_id;
call->in_clientflag = RXRPC_CLIENT_INITIATED;
_net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
......@@ -569,12 +563,6 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
read_lock_bh(&rx->call_lock);
/* mark all the calls as no longer wanting incoming packets */
for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
call = rb_entry(p, struct rxrpc_call, sock_node);
rxrpc_mark_call_released(call);
}
/* kill the not-yet-accepted incoming calls */
list_for_each_entry(call, &rx->secureq, accept_link) {
rxrpc_mark_call_released(call);
......@@ -584,6 +572,12 @@ void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
rxrpc_mark_call_released(call);
}
/* mark all the calls as no longer wanting incoming packets */
for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
call = rb_entry(p, struct rxrpc_call, sock_node);
rxrpc_mark_call_released(call);
}
read_unlock_bh(&rx->call_lock);
_leave("");
}
......@@ -682,8 +676,8 @@ static void rxrpc_destroy_call(struct work_struct *work)
struct rxrpc_call *call =
container_of(work, struct rxrpc_call, destroyer);
_enter("%p{%d,%d,%p}",
call, atomic_read(&call->usage), call->channel, call->conn);
_enter("%p{%d,%x,%p}",
call, atomic_read(&call->usage), call->cid, call->conn);
ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
......
......@@ -280,8 +280,6 @@ int rxrpc_connect_call(struct rxrpc_call *call,
found_channel:
_debug("found chan");
call->conn = conn;
call->channel = chan;
call->epoch = conn->proto.epoch;
call->cid = conn->proto.cid | chan;
call->call_id = ++conn->channels[chan].call_counter;
conn->channels[chan].call_id = call->call_id;
......
......@@ -277,6 +277,7 @@ void rxrpc_process_connection(struct work_struct *work)
/* go through the conn-level event packets, releasing the ref on this
* connection that each one has when we've finished with it */
while ((skb = skb_dequeue(&conn->rx_queue))) {
rxrpc_see_skb(skb);
ret = rxrpc_process_event(conn, skb, &abort_code);
switch (ret) {
case -EPROTO:
......@@ -365,6 +366,7 @@ void rxrpc_reject_packets(struct rxrpc_local *local)
whdr.type = RXRPC_PACKET_TYPE_ABORT;
while ((skb = skb_dequeue(&local->reject_queue))) {
rxrpc_see_skb(skb);
sp = rxrpc_skb(skb);
switch (sa.sa.sa_family) {
case AF_INET:
......
......@@ -156,9 +156,10 @@ struct rxrpc_connection *rxrpc_find_connection_rcu(struct rxrpc_local *local,
void __rxrpc_disconnect_call(struct rxrpc_call *call)
{
struct rxrpc_connection *conn = call->conn;
struct rxrpc_channel *chan = &conn->channels[call->channel];
struct rxrpc_channel *chan =
&conn->channels[call->cid & RXRPC_CHANNELMASK];
_enter("%d,%d", conn->debug_id, call->channel);
_enter("%d,%x", conn->debug_id, call->cid);
if (rcu_access_pointer(chan->call) == call) {
/* Save the result of the call so that we can repeat it if necessary
......
......@@ -93,6 +93,7 @@ void rxrpc_process_local_events(struct rxrpc_local *local)
if (skb) {
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
rxrpc_see_skb(skb);
_debug("{%d},{%u}", local->debug_id, sp->hdr.type);
switch (sp->hdr.type) {
......
......@@ -218,11 +218,11 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
ret = 0;
} else if (cmd != RXRPC_CMD_SEND_DATA) {
ret = -EINVAL;
} else if (!call->in_clientflag &&
} else if (rxrpc_is_client_call(call) &&
call->state != RXRPC_CALL_CLIENT_SEND_REQUEST) {
/* request phase complete for this client call */
ret = -EPROTO;
} else if (call->in_clientflag &&
} else if (rxrpc_is_service_call(call) &&
call->state != RXRPC_CALL_SERVER_ACK_REQUEST &&
call->state != RXRPC_CALL_SERVER_SEND_REPLY) {
/* Reply phase not begun or not complete for service call. */
......@@ -548,6 +548,7 @@ static int rxrpc_send_data(struct rxrpc_sock *rx,
skb = call->tx_pending;
call->tx_pending = NULL;
rxrpc_see_skb(skb);
copied = 0;
do {
......
......@@ -61,8 +61,8 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
call = list_entry(v, struct rxrpc_call, link);
sprintf(lbuff, "%pI4:%u",
&call->local->srx.transport.sin.sin_addr,
ntohs(call->local->srx.transport.sin.sin_port));
&call->socket->local->srx.transport.sin.sin_addr,
ntohs(call->socket->local->srx.transport.sin.sin_port));
conn = call->conn;
if (conn)
......@@ -80,7 +80,7 @@ static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
call->service_id,
call->cid,
call->call_id,
call->in_clientflag ? "Svc" : "Clt",
rxrpc_is_service_call(call) ? "Svc" : "Clt",
atomic_read(&call->usage),
rxrpc_call_states[call->state],
call->remote_abort ?: call->local_abort,
......
......@@ -111,6 +111,7 @@ int rxrpc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
}
peek_next_packet:
rxrpc_see_skb(skb);
sp = rxrpc_skb(skb);
call = sp->call;
ASSERT(call != NULL);
......
......@@ -275,7 +275,7 @@ static int rxkad_secure_packet(struct rxrpc_call *call,
memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
/* calculate the security checksum */
x = call->channel << (32 - RXRPC_CIDSHIFT);
x = (call->cid & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
x |= sp->hdr.seq & 0x3fffffff;
call->crypto_buf[0] = htonl(sp->hdr.callNumber);
call->crypto_buf[1] = htonl(x);
......@@ -507,7 +507,7 @@ static int rxkad_verify_packet(struct rxrpc_call *call,
memcpy(&iv, call->conn->csum_iv.x, sizeof(iv));
/* validate the security checksum */
x = call->channel << (32 - RXRPC_CIDSHIFT);
x = (call->cid & RXRPC_CHANNELMASK) << (32 - RXRPC_CIDSHIFT);
x |= sp->hdr.seq & 0x3fffffff;
call->crypto_buf[0] = htonl(call->call_id);
call->crypto_buf[1] = htonl(x);
......
......@@ -163,3 +163,65 @@ void rxrpc_kernel_free_skb(struct sk_buff *skb)
rxrpc_free_skb(skb);
}
EXPORT_SYMBOL(rxrpc_kernel_free_skb);
/*
* Note the existence of a new-to-us socket buffer (allocated or dequeued).
*/
void rxrpc_new_skb(struct sk_buff *skb)
{
const void *here = __builtin_return_address(0);
int n = atomic_inc_return(&rxrpc_n_skbs);
trace_rxrpc_skb(skb, 0, atomic_read(&skb->users), n, here);
}
/*
* Note the re-emergence of a socket buffer from a queue or buffer.
*/
void rxrpc_see_skb(struct sk_buff *skb)
{
const void *here = __builtin_return_address(0);
if (skb) {
int n = atomic_read(&rxrpc_n_skbs);
trace_rxrpc_skb(skb, 1, atomic_read(&skb->users), n, here);
}
}
/*
* Note the addition of a ref on a socket buffer.
*/
void rxrpc_get_skb(struct sk_buff *skb)
{
const void *here = __builtin_return_address(0);
int n = atomic_inc_return(&rxrpc_n_skbs);
trace_rxrpc_skb(skb, 2, atomic_read(&skb->users), n, here);
skb_get(skb);
}
/*
* Note the destruction of a socket buffer.
*/
void rxrpc_free_skb(struct sk_buff *skb)
{
const void *here = __builtin_return_address(0);
if (skb) {
int n;
CHECK_SLAB_OKAY(&skb->users);
n = atomic_dec_return(&rxrpc_n_skbs);
trace_rxrpc_skb(skb, 3, atomic_read(&skb->users), n, here);
kfree_skb(skb);
}
}
/*
* Clear a queue of socket buffers.
*/
void rxrpc_purge_queue(struct sk_buff_head *list)
{
const void *here = __builtin_return_address(0);
struct sk_buff *skb;
while ((skb = skb_dequeue((list))) != NULL) {
int n = atomic_dec_return(&rxrpc_n_skbs);
trace_rxrpc_skb(skb, 4, atomic_read(&skb->users), n, here);
kfree_skb(skb);
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册