diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 8ba8d76e856ad60e04845154e0e0301b818db6c4..0383e5e9a0f30323b7586f94cf0c6c3dc030d6d8 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -280,11 +280,12 @@ TRACE_EVENT(rxrpc_tx_data,
 		    __entry->lose = lose;
 			   ),
 
-	    TP_printk("c=%p DATA %08x q=%08x fl=%02x%s",
+	    TP_printk("c=%p DATA %08x q=%08x fl=%02x%s%s",
 		      __entry->call,
 		      __entry->serial,
 		      __entry->seq,
 		      __entry->flags,
+		      __entry->retrans ? " *RETRANS*" : "",
 		      __entry->lose ? " *LOSE*" : "")
 	    );
 
@@ -452,17 +453,18 @@ TRACE_EVENT(rxrpc_rtt_rx,
 
 TRACE_EVENT(rxrpc_timer,
 	    TP_PROTO(struct rxrpc_call *call, enum rxrpc_timer_trace why,
-		     unsigned long now),
+		     ktime_t now, unsigned long now_j),
 
-	    TP_ARGS(call, why, now),
+	    TP_ARGS(call, why, now, now_j),
 
 	    TP_STRUCT__entry(
 		    __field(struct rxrpc_call *,		call		)
 		    __field(enum rxrpc_timer_trace,		why		)
-		    __field(unsigned long,			now		)
-		    __field(unsigned long,			expire_at	)
-		    __field(unsigned long,			ack_at		)
-		    __field(unsigned long,			resend_at	)
+		    __field_struct(ktime_t,			now		)
+		    __field_struct(ktime_t,			expire_at	)
+		    __field_struct(ktime_t,			ack_at		)
+		    __field_struct(ktime_t,			resend_at	)
+		    __field(unsigned long,			now_j		)
 		    __field(unsigned long,			timer		)
 			     ),
 
@@ -473,17 +475,17 @@ TRACE_EVENT(rxrpc_timer,
 		    __entry->expire_at	= call->expire_at;
 		    __entry->ack_at	= call->ack_at;
 		    __entry->resend_at	= call->resend_at;
+		    __entry->now_j	= now_j;
 		    __entry->timer	= call->timer.expires;
 			   ),
 
-	    TP_printk("c=%p %s now=%lx x=%ld a=%ld r=%ld t=%ld",
+	    TP_printk("c=%p %s x=%lld a=%lld r=%lld t=%ld",
 		      __entry->call,
 		      rxrpc_timer_traces[__entry->why],
-		      __entry->now,
-		      __entry->expire_at - __entry->now,
-		      __entry->ack_at - __entry->now,
-		      __entry->resend_at - __entry->now,
-		      __entry->timer - __entry->now)
+		      ktime_to_ns(ktime_sub(__entry->expire_at, __entry->now)),
+		      ktime_to_ns(ktime_sub(__entry->ack_at, __entry->now)),
+		      ktime_to_ns(ktime_sub(__entry->resend_at, __entry->now)),
+		      __entry->timer - __entry->now_j)
 	    );
 
 TRACE_EVENT(rxrpc_rx_lose,
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 539db54697f9fe08071d6c8c08ce414df20c8eb6..d38dffd7808536a510a523ad5d3d3032abc4c178 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -144,9 +144,7 @@ struct rxrpc_skb_priv {
 		u8		nr_jumbo;	/* Number of jumbo subpackets */
 	};
 	union {
-		unsigned int	offset;		/* offset into buffer of next read */
 		int		remain;		/* amount of space remaining for next write */
-		u32		error;		/* network error code */
 	};
 
 	struct rxrpc_host_header hdr;		/* RxRPC packet header from this packet */
@@ -466,9 +464,9 @@ struct rxrpc_call {
 	struct rxrpc_connection	*conn;		/* connection carrying call */
 	struct rxrpc_peer	*peer;		/* Peer record for remote address */
 	struct rxrpc_sock __rcu	*socket;	/* socket responsible */
-	unsigned long		ack_at;		/* When deferred ACK needs to happen */
-	unsigned long		resend_at;	/* When next resend needs to happen */
-	unsigned long		expire_at;	/* When the call times out */
+	ktime_t			ack_at;		/* When deferred ACK needs to happen */
+	ktime_t			resend_at;	/* When next resend needs to happen */
+	ktime_t			expire_at;	/* When the call times out */
 	struct timer_list	timer;		/* Combined event timer */
 	struct work_struct	processor;	/* Event processor */
 	rxrpc_notify_rx_t	notify_rx;	/* kernel service Rx notification function */
@@ -807,7 +805,7 @@ int rxrpc_reject_call(struct rxrpc_sock *);
 /*
  * call_event.c
  */
-void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace);
+void rxrpc_set_timer(struct rxrpc_call *, enum rxrpc_timer_trace, ktime_t);
 void rxrpc_propose_ACK(struct rxrpc_call *, u8, u16, u32, bool, bool,
 		       enum rxrpc_propose_ack_trace);
 void rxrpc_process_call(struct work_struct *);
diff --git a/net/rxrpc/call_event.c b/net/rxrpc/call_event.c
index 1f6c7633b96480cfc619eabc26ba37036852c4ba..4f00476630b93587cc83a1894fbe09337c0e2712 100644
--- a/net/rxrpc/call_event.c
+++ b/net/rxrpc/call_event.c
@@ -24,29 +24,53 @@
 /*
  * Set the timer
  */
-void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why)
+void rxrpc_set_timer(struct rxrpc_call *call, enum rxrpc_timer_trace why,
+		     ktime_t now)
 {
-	unsigned long t, now = jiffies;
+	unsigned long t_j, now_j = jiffies;
+	ktime_t t;
+	bool queue = false;
 
 	read_lock_bh(&call->state_lock);
 
 	if (call->state < RXRPC_CALL_COMPLETE) {
 		t = call->expire_at;
-		if (time_before_eq(t, now))
+		if (!ktime_after(t, now))
 			goto out;
 
-		if (time_after(call->resend_at, now) &&
-		    time_before(call->resend_at, t))
+		if (!ktime_after(call->resend_at, now)) {
+			call->resend_at = call->expire_at;
+			if (!test_and_set_bit(RXRPC_CALL_EV_RESEND, &call->events))
+				queue = true;
+		} else if (ktime_before(call->resend_at, t)) {
 			t = call->resend_at;
+		}
 
-		if (time_after(call->ack_at, now) &&
-		    time_before(call->ack_at, t))
+		if (!ktime_after(call->ack_at, now)) {
+			call->ack_at = call->expire_at;
+			if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
+				queue = true;
+		} else if (ktime_before(call->ack_at, t)) {
 			t = call->ack_at;
+		}
 
-		if (call->timer.expires != t || !timer_pending(&call->timer)) {
-			mod_timer(&call->timer, t);
-			trace_rxrpc_timer(call, why, now);
+		t_j = nsecs_to_jiffies(ktime_to_ns(ktime_sub(t, now)));
+		t_j += jiffies;
+
+		/* We have to make sure that the calculated jiffies value falls
+		 * at or after the nsec value, or we may loop ceaselessly
+		 * because the timer times out, but we haven't reached the nsec
+		 * timeout yet.
+		 */
+		t_j++;
+
+		if (call->timer.expires != t_j || !timer_pending(&call->timer)) {
+			mod_timer(&call->timer, t_j);
+			trace_rxrpc_timer(call, why, now, now_j);
 		}
+
+		if (queue)
+			rxrpc_queue_call(call);
 	}
 
 out:
@@ -62,7 +86,8 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
 				enum rxrpc_propose_ack_trace why)
 {
 	enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
-	unsigned long now, ack_at, expiry = rxrpc_soft_ack_delay;
+	unsigned int expiry = rxrpc_soft_ack_delay;
+	ktime_t now, ack_at;
 	s8 prior = rxrpc_ack_priority[ack_reason];
 
 	/* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
@@ -111,7 +136,6 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
 		break;
 	}
 
-	now = jiffies;
 	if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
 		_debug("already scheduled");
 	} else if (immediate || expiry == 0) {
@@ -120,11 +144,11 @@ static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
 		    background)
 			rxrpc_queue_call(call);
 	} else {
-		ack_at = now + expiry;
-		_debug("deferred ACK %ld < %ld", expiry, call->ack_at - now);
-		if (time_before(ack_at, call->ack_at)) {
+		now = ktime_get_real();
+		ack_at = ktime_add_ms(now, expiry);
+		if (ktime_before(ack_at, call->ack_at)) {
 			call->ack_at = ack_at;
-			rxrpc_set_timer(call, rxrpc_timer_set_for_ack);
+			rxrpc_set_timer(call, rxrpc_timer_set_for_ack, now);
 		}
 	}
 
@@ -157,12 +181,12 @@ static void rxrpc_congestion_timeout(struct rxrpc_call *call)
 /*
  * Perform retransmission of NAK'd and unack'd packets.
  */
-static void rxrpc_resend(struct rxrpc_call *call)
+static void rxrpc_resend(struct rxrpc_call *call, ktime_t now)
 {
 	struct rxrpc_skb_priv *sp;
 	struct sk_buff *skb;
 	rxrpc_seq_t cursor, seq, top;
-	ktime_t now = ktime_get_real(), max_age, oldest, resend_at, ack_ts;
+	ktime_t max_age, oldest, ack_ts;
 	int ix;
 	u8 annotation, anno_type, retrans = 0, unacked = 0;
 
@@ -212,14 +236,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
 				       ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
 	}
 
-	resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);
-	call->resend_at = jiffies +
-		nsecs_to_jiffies(ktime_to_ns(ktime_sub(resend_at, now))) +
-		1; /* We have to make sure that the calculated jiffies value
-		    * falls at or after the nsec value, or we shall loop
-		    * ceaselessly because the timer times out, but we haven't
-		    * reached the nsec timeout yet.
-		    */
+	call->resend_at = ktime_add_ms(oldest, rxrpc_resend_timeout);
 
 	if (unacked)
 		rxrpc_congestion_timeout(call);
@@ -229,7 +246,7 @@ static void rxrpc_resend(struct rxrpc_call *call)
 	 * retransmitting data.
 	 */
 	if (!retrans) {
-		rxrpc_set_timer(call, rxrpc_timer_set_for_resend);
+		rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
 		spin_unlock_bh(&call->lock);
 		ack_ts = ktime_sub(now, call->acks_latest_ts);
 		if (ktime_to_ns(ack_ts) < call->peer->rtt)
@@ -301,7 +318,7 @@ void rxrpc_process_call(struct work_struct *work)
 {
 	struct rxrpc_call *call =
 		container_of(work, struct rxrpc_call, processor);
-	unsigned long now;
+	ktime_t now;
 
 	rxrpc_see_call(call);
 
@@ -320,15 +337,14 @@ void rxrpc_process_call(struct work_struct *work)
 		goto out_put;
 	}
 
-	now = jiffies;
-	if (time_after_eq(now, call->expire_at)) {
+	now = ktime_get_real();
+	if (ktime_before(call->expire_at, now)) {
 		rxrpc_abort_call("EXP", call, 0, RX_CALL_TIMEOUT, ETIME);
 		set_bit(RXRPC_CALL_EV_ABORT, &call->events);
 		goto recheck_state;
 	}
 
-	if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
-	    time_after_eq(now, call->ack_at)) {
+	if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events)) {
 		call->ack_at = call->expire_at;
 		if (call->ackr_reason) {
 			rxrpc_send_call_packet(call, RXRPC_PACKET_TYPE_ACK);
@@ -336,13 +352,12 @@ void rxrpc_process_call(struct work_struct *work)
 		}
 	}
 
-	if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events) ||
-	    time_after_eq(now, call->resend_at)) {
-		rxrpc_resend(call);
+	if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) {
+		rxrpc_resend(call, now);
 		goto recheck_state;
 	}
 
-	rxrpc_set_timer(call, rxrpc_timer_set_for_resend);
+	rxrpc_set_timer(call, rxrpc_timer_set_for_resend, now);
 
 	/* other events may have been raised since we started checking */
 	if (call->events && call->state < RXRPC_CALL_COMPLETE) {
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index d4b3293b78faf22704d7c0e9104dc0dbc6027887..364b42dc3dce5360f79e8a26b2d5829361ba20aa 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -19,11 +19,6 @@
 #include <net/af_rxrpc.h>
 #include "ar-internal.h"
 
-/*
- * Maximum lifetime of a call (in jiffies).
- */
-unsigned int rxrpc_max_call_lifetime = 60 * HZ;
-
 const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
 	[RXRPC_CALL_UNINITIALISED]		= "Uninit  ",
 	[RXRPC_CALL_CLIENT_AWAIT_CONN]		= "ClWtConn",
@@ -76,10 +71,8 @@ static void rxrpc_call_timer_expired(unsigned long _call)
 
 	_enter("%d", call->debug_id);
 
-	if (call->state < RXRPC_CALL_COMPLETE) {
-		trace_rxrpc_timer(call, rxrpc_timer_expired, jiffies);
-		rxrpc_queue_call(call);
-	}
+	if (call->state < RXRPC_CALL_COMPLETE)
+		rxrpc_set_timer(call, rxrpc_timer_expired, ktime_get_real());
 }
 
 /*
@@ -207,14 +200,14 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct sockaddr_rxrpc *srx,
  */
 static void rxrpc_start_call_timer(struct rxrpc_call *call)
 {
-	unsigned long expire_at;
+	ktime_t now = ktime_get_real(), expire_at;
 
-	expire_at = jiffies + rxrpc_max_call_lifetime;
+	expire_at = ktime_add_ms(now, rxrpc_max_call_lifetime);
 	call->expire_at = expire_at;
 	call->ack_at = expire_at;
 	call->resend_at = expire_at;
-	call->timer.expires = expire_at + 1;
-	rxrpc_set_timer(call, rxrpc_timer_begin);
+	call->timer.expires = jiffies + LONG_MAX / 2;
+	rxrpc_set_timer(call, rxrpc_timer_begin, now);
 }
 
 /*
diff --git a/net/rxrpc/conn_event.c b/net/rxrpc/conn_event.c
index 37609ce89f52ee612b558b439ead5e31e8dca4f7..3f9d8d7ec6323a95de3e08d01098abdfcf33ff4f 100644
--- a/net/rxrpc/conn_event.c
+++ b/net/rxrpc/conn_event.c
@@ -276,7 +276,8 @@ static int rxrpc_process_event(struct rxrpc_connection *conn,
 		return 0;
 
 	case RXRPC_PACKET_TYPE_ABORT:
-		if (skb_copy_bits(skb, sp->offset, &wtmp, sizeof(wtmp)) < 0)
+		if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
+				  &wtmp, sizeof(wtmp)) < 0)
 			return -EPROTO;
 		abort_code = ntohl(wtmp);
 		_proto("Rx ABORT %%%u { ac=%d }", sp->hdr.serial, abort_code);
diff --git a/net/rxrpc/input.c b/net/rxrpc/input.c
index 1461d30583c9bf5eaa938aa934323dc8db854bf4..3ad9f75031e34806a892139a7f1c9b7fb1dc16a7 100644
--- a/net/rxrpc/input.c
+++ b/net/rxrpc/input.c
@@ -57,7 +57,7 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
 		call->cong_ssthresh = max_t(unsigned int,
 					    summary->flight_size / 2, 2);
 		cwnd = 1;
-		if (cwnd > call->cong_ssthresh &&
+		if (cwnd >= call->cong_ssthresh &&
 		    call->cong_mode == RXRPC_CALL_SLOW_START) {
 			call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
 			call->cong_tstamp = skb->tstamp;
@@ -82,7 +82,7 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
 			goto packet_loss_detected;
 		if (summary->cumulative_acks > 0)
 			cwnd += 1;
-		if (cwnd > call->cong_ssthresh) {
+		if (cwnd >= call->cong_ssthresh) {
 			call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
 			call->cong_tstamp = skb->tstamp;
 		}
@@ -161,7 +161,7 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
 	call->cong_dup_acks = 0;
 	call->cong_extra = 0;
 	call->cong_tstamp = skb->tstamp;
-	if (cwnd <= call->cong_ssthresh)
+	if (cwnd < call->cong_ssthresh)
 		call->cong_mode = RXRPC_CALL_SLOW_START;
 	else
 		call->cong_mode = RXRPC_CALL_CONGEST_AVOIDANCE;
@@ -328,7 +328,8 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
 		call->resend_at = call->expire_at;
 		call->ack_at = call->expire_at;
 		spin_unlock_bh(&call->lock);
-		rxrpc_set_timer(call, rxrpc_timer_init_for_reply);
+		rxrpc_set_timer(call, rxrpc_timer_init_for_reply,
+				ktime_get_real());
 	}
 
 	if (!test_bit(RXRPC_CALL_TX_LAST, &call->flags))
@@ -358,7 +359,7 @@ static bool rxrpc_receiving_reply(struct rxrpc_call *call)
 static bool rxrpc_validate_jumbo(struct sk_buff *skb)
 {
 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-	unsigned int offset = sp->offset;
+	unsigned int offset = sizeof(struct rxrpc_wire_header);
 	unsigned int len = skb->len;
 	int nr_jumbo = 1;
 	u8 flags = sp->hdr.flags;
@@ -419,7 +420,7 @@ static void rxrpc_input_data(struct rxrpc_call *call, struct sk_buff *skb,
 			     u16 skew)
 {
 	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-	unsigned int offset = sp->offset;
+	unsigned int offset = sizeof(struct rxrpc_wire_header);
 	unsigned int ix;
 	rxrpc_serial_t serial = sp->hdr.serial, ack_serial = 0;
 	rxrpc_seq_t seq = sp->hdr.seq, hard_ack;
@@ -658,6 +659,8 @@ static void rxrpc_input_ackinfo(struct rxrpc_call *call, struct sk_buff *skb,
 	if (rwind > RXRPC_RXTX_BUFF_SIZE - 1)
 		rwind = RXRPC_RXTX_BUFF_SIZE - 1;
 	call->tx_winsize = rwind;
+	if (call->cong_ssthresh > rwind)
+		call->cong_ssthresh = rwind;
 
 	mtu = min(ntohl(ackinfo->rxMTU), ntohl(ackinfo->maxMTU));
 
@@ -744,15 +747,16 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
 	} buf;
 	rxrpc_serial_t acked_serial;
 	rxrpc_seq_t first_soft_ack, hard_ack;
-	int nr_acks, offset;
+	int nr_acks, offset, ioffset;
 
 	_enter("");
 
-	if (skb_copy_bits(skb, sp->offset, &buf.ack, sizeof(buf.ack)) < 0) {
+	offset = sizeof(struct rxrpc_wire_header);
+	if (skb_copy_bits(skb, offset, &buf.ack, sizeof(buf.ack)) < 0) {
 		_debug("extraction failure");
 		return rxrpc_proto_abort("XAK", call, 0);
 	}
-	sp->offset += sizeof(buf.ack);
+	offset += sizeof(buf.ack);
 
 	acked_serial = ntohl(buf.ack.serial);
 	first_soft_ack = ntohl(buf.ack.firstPacket);
@@ -790,9 +794,9 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
 				  rxrpc_propose_ack_respond_to_ack);
 	}
 
-	offset = sp->offset + nr_acks + 3;
-	if (skb->len >= offset + sizeof(buf.info)) {
-		if (skb_copy_bits(skb, offset, &buf.info, sizeof(buf.info)) < 0)
+	ioffset = offset + nr_acks + 3;
+	if (skb->len >= ioffset + sizeof(buf.info)) {
+		if (skb_copy_bits(skb, ioffset, &buf.info, sizeof(buf.info)) < 0)
 			return rxrpc_proto_abort("XAI", call, 0);
 		rxrpc_input_ackinfo(call, skb, &buf.info);
 	}
@@ -830,7 +834,7 @@ static void rxrpc_input_ack(struct rxrpc_call *call, struct sk_buff *skb,
 		rxrpc_rotate_tx_window(call, hard_ack, &summary);
 
 	if (nr_acks > 0) {
-		if (skb_copy_bits(skb, sp->offset, buf.acks, nr_acks) < 0)
+		if (skb_copy_bits(skb, offset, buf.acks, nr_acks) < 0)
 			return rxrpc_proto_abort("XSA", call, 0);
 		rxrpc_input_soft_acks(call, buf.acks, first_soft_ack, nr_acks,
 				      &summary);
@@ -878,7 +882,8 @@ static void rxrpc_input_abort(struct rxrpc_call *call, struct sk_buff *skb)
 	_enter("");
 
 	if (skb->len >= 4 &&
-	    skb_copy_bits(skb, sp->offset, &wtmp, sizeof(wtmp)) >= 0)
+	    skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
+			  &wtmp, sizeof(wtmp)) >= 0)
 		abort_code = ntohl(wtmp);
 
 	_proto("Rx ABORT %%%u { %x }", sp->hdr.serial, abort_code);
@@ -994,7 +999,6 @@ int rxrpc_extract_header(struct rxrpc_skb_priv *sp, struct sk_buff *skb)
 	sp->hdr.securityIndex	= whdr.securityIndex;
 	sp->hdr._rsvd		= ntohs(whdr._rsvd);
 	sp->hdr.serviceId	= ntohs(whdr.serviceId);
-	sp->offset = sizeof(whdr);
 	return 0;
 }
 
diff --git a/net/rxrpc/local_event.c b/net/rxrpc/local_event.c
index 190f68bd9e270f2d64969f46d2368480bf3d3ddb..540d3955c1bcc11652879c7e80630978513cc16e 100644
--- a/net/rxrpc/local_event.c
+++ b/net/rxrpc/local_event.c
@@ -95,7 +95,8 @@ void rxrpc_process_local_events(struct rxrpc_local *local)
 
 		switch (sp->hdr.type) {
 		case RXRPC_PACKET_TYPE_VERSION:
-			if (skb_copy_bits(skb, sp->offset, &v, 1) < 0)
+			if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
+					  &v, 1) < 0)
 				return;
 			_proto("Rx VERSION { %02x }", v);
 			if (v == 0)
diff --git a/net/rxrpc/misc.c b/net/rxrpc/misc.c
index 47dddacdbb91a9881daaee8694f5df003385f36d..9d1c721bc4e803d52c450aa640ca7eb675802c73 100644
--- a/net/rxrpc/misc.c
+++ b/net/rxrpc/misc.c
@@ -20,29 +20,34 @@
  */
 unsigned int rxrpc_max_backlog __read_mostly = 10;
 
+/*
+ * Maximum lifetime of a call (in mx).
+ */
+unsigned int rxrpc_max_call_lifetime = 60 * 1000;
+
 /*
  * How long to wait before scheduling ACK generation after seeing a
- * packet with RXRPC_REQUEST_ACK set (in jiffies).
+ * packet with RXRPC_REQUEST_ACK set (in ms).
  */
 unsigned int rxrpc_requested_ack_delay = 1;
 
 /*
- * How long to wait before scheduling an ACK with subtype DELAY (in jiffies).
+ * How long to wait before scheduling an ACK with subtype DELAY (in ms).
  *
  * We use this when we've received new data packets.  If those packets aren't
  * all consumed within this time we will send a DELAY ACK if an ACK was not
  * requested to let the sender know it doesn't need to resend.
  */
-unsigned int rxrpc_soft_ack_delay = 1 * HZ;
+unsigned int rxrpc_soft_ack_delay = 1 * 1000;
 
 /*
- * How long to wait before scheduling an ACK with subtype IDLE (in jiffies).
+ * How long to wait before scheduling an ACK with subtype IDLE (in ms).
  *
  * We use this when we've consumed some previously soft-ACK'd packets when
  * further packets aren't immediately received to decide when to send an IDLE
  * ACK let the other end know that it can free up its Tx buffer space.
  */
-unsigned int rxrpc_idle_ack_delay = 0.5 * HZ;
+unsigned int rxrpc_idle_ack_delay = 0.5 * 1000;
 
 /*
  * Receive window size in packets.  This indicates the maximum number of
diff --git a/net/rxrpc/recvmsg.c b/net/rxrpc/recvmsg.c
index 038ae62ddb4d638c3e83f774069122ced9ba89d9..f05ea0a8807673df0d7534a0f9a41020dbaa0a05 100644
--- a/net/rxrpc/recvmsg.c
+++ b/net/rxrpc/recvmsg.c
@@ -261,15 +261,13 @@ static int rxrpc_locate_data(struct rxrpc_call *call, struct sk_buff *skb,
 			     u8 *_annotation,
 			     unsigned int *_offset, unsigned int *_len)
 {
-	struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
-	unsigned int offset = *_offset;
+	unsigned int offset = sizeof(struct rxrpc_wire_header);
 	unsigned int len = *_len;
 	int ret;
 	u8 annotation = *_annotation;
 
 	/* Locate the subpacket */
-	offset = sp->offset;
-	len = skb->len - sp->offset;
+	len = skb->len - offset;
 	if ((annotation & RXRPC_RX_ANNO_JUMBO) > 0) {
 		offset += (((annotation & RXRPC_RX_ANNO_JUMBO) - 1) *
 			   RXRPC_JUMBO_SUBPKTLEN);
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 88d080a1a3deed925941212ed70564236d604f93..627abed5f999f92f584912491b54ee5a5657d92e 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -771,7 +771,8 @@ static int rxkad_respond_to_challenge(struct rxrpc_connection *conn,
 	}
 
 	abort_code = RXKADPACKETSHORT;
-	if (skb_copy_bits(skb, sp->offset, &challenge, sizeof(challenge)) < 0)
+	if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
+			  &challenge, sizeof(challenge)) < 0)
 		goto protocol_error;
 
 	version = ntohl(challenge.version);
@@ -1028,7 +1029,8 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
 	_enter("{%d,%x}", conn->debug_id, key_serial(conn->server_key));
 
 	abort_code = RXKADPACKETSHORT;
-	if (skb_copy_bits(skb, sp->offset, &response, sizeof(response)) < 0)
+	if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
+			  &response, sizeof(response)) < 0)
 		goto protocol_error;
 	if (!pskb_pull(skb, sizeof(response)))
 		BUG();
@@ -1057,7 +1059,8 @@ static int rxkad_verify_response(struct rxrpc_connection *conn,
 		return -ENOMEM;
 
 	abort_code = RXKADPACKETSHORT;
-	if (skb_copy_bits(skb, sp->offset, ticket, ticket_len) < 0)
+	if (skb_copy_bits(skb, sizeof(struct rxrpc_wire_header),
+			  ticket, ticket_len) < 0)
 		goto protocol_error_free;
 
 	ret = rxkad_decrypt_ticket(conn, ticket, ticket_len, &session_key,
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index d8dfdce874d8b20e06fca9b7323fe668fecf6c6a..3322543d460ac69858a0afc5e261441f780a7a15 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -149,13 +149,13 @@ static void rxrpc_queue_packet(struct rxrpc_call *call, struct sk_buff *skb,
 		_debug("need instant resend %d", ret);
 		rxrpc_instant_resend(call, ix);
 	} else {
-		unsigned long resend_at;
+		ktime_t now = ktime_get_real(), resend_at;
 
-		resend_at = jiffies + msecs_to_jiffies(rxrpc_resend_timeout);
+		resend_at = ktime_add_ms(now, rxrpc_resend_timeout);
 
-		if (time_before(resend_at, call->resend_at)) {
+		if (ktime_before(resend_at, call->resend_at)) {
 			call->resend_at = resend_at;
-			rxrpc_set_timer(call, rxrpc_timer_set_for_send);
+			rxrpc_set_timer(call, rxrpc_timer_set_for_send, now);
 		}
 	}
 
diff --git a/net/rxrpc/sysctl.c b/net/rxrpc/sysctl.c
index 13d1df03ebac3a8ceba6da5b1b19cc3c349200f0..34c706d2f79c695076f228facc4b378ce37296cf 100644
--- a/net/rxrpc/sysctl.c
+++ b/net/rxrpc/sysctl.c
@@ -35,7 +35,7 @@ static struct ctl_table rxrpc_sysctl_table[] = {
 		.data		= &rxrpc_requested_ack_delay,
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec_ms_jiffies,
+		.proc_handler	= proc_dointvec,
 		.extra1		= (void *)&zero,
 	},
 	{
@@ -43,7 +43,7 @@ static struct ctl_table rxrpc_sysctl_table[] = {
 		.data		= &rxrpc_soft_ack_delay,
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec_ms_jiffies,
+		.proc_handler	= proc_dointvec,
 		.extra1		= (void *)&one,
 	},
 	{
@@ -51,7 +51,7 @@ static struct ctl_table rxrpc_sysctl_table[] = {
 		.data		= &rxrpc_idle_ack_delay,
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec_ms_jiffies,
+		.proc_handler	= proc_dointvec,
 		.extra1		= (void *)&one,
 	},
 	{
@@ -85,7 +85,7 @@ static struct ctl_table rxrpc_sysctl_table[] = {
 		.data		= &rxrpc_max_call_lifetime,
 		.maxlen		= sizeof(unsigned int),
 		.mode		= 0644,
-		.proc_handler	= proc_dointvec_jiffies,
+		.proc_handler	= proc_dointvec,
 		.extra1		= (void *)&one,
 	},