提交 2c64605b 编写于 作者: P Pablo Neira Ayuso 提交者: David S. Miller

net: Fix CONFIG_NET_CLS_ACT=n and CONFIG_NFT_FWD_NETDEV={y, m} build

net/netfilter/nft_fwd_netdev.c: In function ‘nft_fwd_netdev_eval’:
    net/netfilter/nft_fwd_netdev.c:32:10: error: ‘struct sk_buff’ has no member named ‘tc_redirected’
      pkt->skb->tc_redirected = 1;
              ^~
    net/netfilter/nft_fwd_netdev.c:33:10: error: ‘struct sk_buff’ has no member named ‘tc_from_ingress’
      pkt->skb->tc_from_ingress = 1;
              ^~

To avoid a direct dependency with tc actions from netfilter, wrap the
redirect bits around CONFIG_NET_REDIRECT and move helpers to
include/linux/skbuff.h. Turn on this toggle from the ifb driver, the
only existing client of these bits in the tree.

This patch adds skb_set_redirected() that sets on the redirected bit
on the skbuff, it specifies if the packet was redirect from ingress
and resets the timestamp (timestamp reset was originally missing in the
netfilter bugfix).

Fixes: bcfabee1 ("netfilter: nft_fwd_netdev: allow to redirect to ifb via ingress")
Reported-by: noreply@ellerman.id.au
Reported-by: NGeert Uytterhoeven <geert@linux-m68k.org>
Signed-off-by: NPablo Neira Ayuso <pablo@netfilter.org>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 428c4913
...@@ -149,6 +149,7 @@ config NET_FC ...@@ -149,6 +149,7 @@ config NET_FC
config IFB config IFB
tristate "Intermediate Functional Block support" tristate "Intermediate Functional Block support"
depends on NET_CLS_ACT depends on NET_CLS_ACT
select NET_REDIRECT
---help--- ---help---
This is an intermediate driver that allows sharing of This is an intermediate driver that allows sharing of
resources. resources.
......
...@@ -75,7 +75,7 @@ static void ifb_ri_tasklet(unsigned long _txp) ...@@ -75,7 +75,7 @@ static void ifb_ri_tasklet(unsigned long _txp)
} }
while ((skb = __skb_dequeue(&txp->tq)) != NULL) { while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
skb->tc_redirected = 0; skb->redirected = 0;
skb->tc_skip_classify = 1; skb->tc_skip_classify = 1;
u64_stats_update_begin(&txp->tsync); u64_stats_update_begin(&txp->tsync);
...@@ -96,7 +96,7 @@ static void ifb_ri_tasklet(unsigned long _txp) ...@@ -96,7 +96,7 @@ static void ifb_ri_tasklet(unsigned long _txp)
rcu_read_unlock(); rcu_read_unlock();
skb->skb_iif = txp->dev->ifindex; skb->skb_iif = txp->dev->ifindex;
if (!skb->tc_from_ingress) { if (!skb->from_ingress) {
dev_queue_xmit(skb); dev_queue_xmit(skb);
} else { } else {
skb_pull_rcsum(skb, skb->mac_len); skb_pull_rcsum(skb, skb->mac_len);
...@@ -243,7 +243,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -243,7 +243,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
txp->rx_bytes += skb->len; txp->rx_bytes += skb->len;
u64_stats_update_end(&txp->rsync); u64_stats_update_end(&txp->rsync);
if (!skb->tc_redirected || !skb->skb_iif) { if (!skb->redirected || !skb->skb_iif) {
dev_kfree_skb(skb); dev_kfree_skb(skb);
dev->stats.rx_dropped++; dev->stats.rx_dropped++;
return NETDEV_TX_OK; return NETDEV_TX_OK;
......
...@@ -100,8 +100,8 @@ static inline void wg_reset_packet(struct sk_buff *skb) ...@@ -100,8 +100,8 @@ static inline void wg_reset_packet(struct sk_buff *skb)
skb->dev = NULL; skb->dev = NULL;
#ifdef CONFIG_NET_SCHED #ifdef CONFIG_NET_SCHED
skb->tc_index = 0; skb->tc_index = 0;
skb_reset_tc(skb);
#endif #endif
skb_reset_redirect(skb);
skb->hdr_len = skb_headroom(skb); skb->hdr_len = skb_headroom(skb);
skb_reset_mac_header(skb); skb_reset_mac_header(skb);
skb_reset_network_header(skb); skb_reset_network_header(skb);
......
...@@ -645,8 +645,8 @@ typedef unsigned char *sk_buff_data_t; ...@@ -645,8 +645,8 @@ typedef unsigned char *sk_buff_data_t;
* @offload_l3_fwd_mark: Packet was L3-forwarded in hardware * @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
* @tc_skip_classify: do not classify packet. set by IFB device * @tc_skip_classify: do not classify packet. set by IFB device
* @tc_at_ingress: used within tc_classify to distinguish in/egress * @tc_at_ingress: used within tc_classify to distinguish in/egress
* @tc_redirected: packet was redirected by a tc action * @redirected: packet was redirected by packet classifier
* @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect * @from_ingress: packet was redirected from the ingress path
* @peeked: this packet has been seen already, so stats have been * @peeked: this packet has been seen already, so stats have been
* done for it, don't do them again * done for it, don't do them again
* @nf_trace: netfilter packet trace flag * @nf_trace: netfilter packet trace flag
...@@ -848,8 +848,10 @@ struct sk_buff { ...@@ -848,8 +848,10 @@ struct sk_buff {
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
__u8 tc_skip_classify:1; __u8 tc_skip_classify:1;
__u8 tc_at_ingress:1; __u8 tc_at_ingress:1;
__u8 tc_redirected:1; #endif
__u8 tc_from_ingress:1; #ifdef CONFIG_NET_REDIRECT
__u8 redirected:1;
__u8 from_ingress:1;
#endif #endif
#ifdef CONFIG_TLS_DEVICE #ifdef CONFIG_TLS_DEVICE
__u8 decrypted:1; __u8 decrypted:1;
...@@ -4579,5 +4581,31 @@ static inline __wsum lco_csum(struct sk_buff *skb) ...@@ -4579,5 +4581,31 @@ static inline __wsum lco_csum(struct sk_buff *skb)
return csum_partial(l4_hdr, csum_start - l4_hdr, partial); return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
} }
static inline bool skb_is_redirected(const struct sk_buff *skb)
{
#ifdef CONFIG_NET_REDIRECT
return skb->redirected;
#else
return false;
#endif
}
static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
{
#ifdef CONFIG_NET_REDIRECT
skb->redirected = 1;
skb->from_ingress = from_ingress;
if (skb->from_ingress)
skb->tstamp = 0;
#endif
}
static inline void skb_reset_redirect(struct sk_buff *skb)
{
#ifdef CONFIG_NET_REDIRECT
skb->redirected = 0;
#endif
}
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */ #endif /* _LINUX_SKBUFF_H */
...@@ -675,22 +675,6 @@ void __qdisc_calculate_pkt_len(struct sk_buff *skb, ...@@ -675,22 +675,6 @@ void __qdisc_calculate_pkt_len(struct sk_buff *skb,
const struct qdisc_size_table *stab); const struct qdisc_size_table *stab);
int skb_do_redirect(struct sk_buff *); int skb_do_redirect(struct sk_buff *);
static inline void skb_reset_tc(struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
skb->tc_redirected = 0;
#endif
}
static inline bool skb_is_tc_redirected(const struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
return skb->tc_redirected;
#else
return false;
#endif
}
static inline bool skb_at_tc_ingress(const struct sk_buff *skb) static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
{ {
#ifdef CONFIG_NET_CLS_ACT #ifdef CONFIG_NET_CLS_ACT
......
...@@ -52,6 +52,9 @@ config NET_INGRESS ...@@ -52,6 +52,9 @@ config NET_INGRESS
config NET_EGRESS config NET_EGRESS
bool bool
config NET_REDIRECT
bool
config SKB_EXTENSIONS config SKB_EXTENSIONS
bool bool
......
...@@ -4516,7 +4516,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb, ...@@ -4516,7 +4516,7 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
/* Reinjected packets coming from act_mirred or similar should /* Reinjected packets coming from act_mirred or similar should
* not get XDP generic processing. * not get XDP generic processing.
*/ */
if (skb_is_tc_redirected(skb)) if (skb_is_redirected(skb))
return XDP_PASS; return XDP_PASS;
/* XDP packets must be linear and must have sufficient headroom /* XDP packets must be linear and must have sufficient headroom
...@@ -5063,7 +5063,7 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc, ...@@ -5063,7 +5063,7 @@ static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc,
goto out; goto out;
} }
#endif #endif
skb_reset_tc(skb); skb_reset_redirect(skb);
skip_classify: skip_classify:
if (pfmemalloc && !skb_pfmemalloc_protocol(skb)) if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
goto drop; goto drop;
......
...@@ -3362,7 +3362,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev) ...@@ -3362,7 +3362,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
/* skb was 'freed' by stack, so clean few /* skb was 'freed' by stack, so clean few
* bits and reuse it * bits and reuse it
*/ */
skb_reset_tc(skb); skb_reset_redirect(skb);
} while (--burst > 0); } while (--burst > 0);
goto out; /* Skips xmit_mode M_START_XMIT */ goto out; /* Skips xmit_mode M_START_XMIT */
} else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) { } else if (pkt_dev->xmit_mode == M_QUEUE_XMIT) {
......
...@@ -28,9 +28,8 @@ static void nft_fwd_netdev_eval(const struct nft_expr *expr, ...@@ -28,9 +28,8 @@ static void nft_fwd_netdev_eval(const struct nft_expr *expr,
struct nft_fwd_netdev *priv = nft_expr_priv(expr); struct nft_fwd_netdev *priv = nft_expr_priv(expr);
int oif = regs->data[priv->sreg_dev]; int oif = regs->data[priv->sreg_dev];
/* These are used by ifb only. */ /* This is used by ifb only. */
pkt->skb->tc_redirected = 1; skb_set_redirected(pkt->skb, true);
pkt->skb->tc_from_ingress = 1;
nf_fwd_netdev_egress(pkt, oif); nf_fwd_netdev_egress(pkt, oif);
regs->verdict.code = NF_STOLEN; regs->verdict.code = NF_STOLEN;
......
...@@ -284,10 +284,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a, ...@@ -284,10 +284,8 @@ static int tcf_mirred_act(struct sk_buff *skb, const struct tc_action *a,
/* mirror is always swallowed */ /* mirror is always swallowed */
if (is_redirect) { if (is_redirect) {
skb2->tc_redirected = 1; skb_set_redirected(skb2, skb2->tc_at_ingress);
skb2->tc_from_ingress = skb2->tc_at_ingress;
if (skb2->tc_from_ingress)
skb2->tstamp = 0;
/* let's the caller reinsert the packet, if possible */ /* let's the caller reinsert the packet, if possible */
if (use_reinsert) { if (use_reinsert) {
res->ingress = want_ingress; res->ingress = want_ingress;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册