提交 f9d1846f 编写于 作者: D David S. Miller

Merge branch 'cxgb4-tc-offload'

Rahul Lakkireddy says:

====================
cxgb4: add support for offloading TC u32 filters

This series of patches add support to offload TC u32 filters onto
Chelsio NICs.

Patch 1 moves current common filter code to separate files
in order to provide a common api for performing packet classification
and filtering in Chelsio NICs.

Patch 2 enables filters for normal NIC configuration and implements
common api for setting and deleting filters.

Patches 3-5 add support for TC u32 offload via ndo_setup_tc.

---
v3:

Based on review and suggestion from David Miller <davem@davemloft.net>
- Fixed all local variable declarations by placing them in longest line
  first and shortest line last order.

v2:

Based on review and suggestions from Jiri Pirko <jiri@resnulli.us>:
- Replaced macros S and U with appropriate static helper functions.
- Moved completion code for set and delete filters to respective
  functions cxgb4_set_filter() and cxgb4_del_filter().  Renamed the
  original functions to __cxgb4_set_filter() and __cxgb4_del_filter()
  in case synchronization is not required.
- Dropped debugfs patch.
- Merged code for inserting and deleting u32 filters into a single
  patch.
- Reworked and fixed bugs with traversing the actions list.
- Removed all unnecessary extra ().
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -4,7 +4,7 @@
obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4_uld.o sched.o
cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o clip_tbl.o cxgb4_ethtool.o cxgb4_uld.o sched.o cxgb4_filter.o cxgb4_tc_u32.o
cxgb4-$(CONFIG_CHELSIO_T4_DCB) += cxgb4_dcb.o
cxgb4-$(CONFIG_CHELSIO_T4_FCOE) += cxgb4_fcoe.o
cxgb4-$(CONFIG_DEBUG_FS) += cxgb4_debugfs.o
......@@ -851,6 +851,9 @@ struct adapter {
spinlock_t stats_lock;
spinlock_t win0_lock ____cacheline_aligned_in_smp;
/* TC u32 offload */
struct cxgb4_tc_u32_table *tc_u32;
};
/* Support for "sched-class" command to allow a TX Scheduling Class to be
......@@ -1025,6 +1028,32 @@ enum {
VLAN_REWRITE
};
/* Host shadow copy of ingress filter entry. This is in host native format
* and doesn't match the ordering or bit order, etc. of the hardware of the
* firmware command. The use of bit-field structure elements is purely to
* remind ourselves of the field size limitations and save memory in the case
* where the filter table is large.
*/
struct filter_entry {
/* Administrative fields for filter. */
u32 valid:1; /* filter allocated and valid */
u32 locked:1; /* filter is administratively locked */
u32 pending:1; /* filter action is pending firmware reply */
u32 smtidx:8; /* Source MAC Table index for smac */
struct filter_ctx *ctx; /* Caller's completion hook */
struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
struct net_device *dev; /* Associated net device */
u32 tid; /* This will store the actual tid */
/* The filter itself. Most of this is a straight copy of information
* provided by the extended ioctl(). Some fields are translated to
* internal forms -- for instance the Ingress Queue ID passed in from
* the ioctl() is translated into the Absolute Ingress Queue ID.
*/
struct ch_filter_specification fs;
};
static inline int is_offload(const struct adapter *adap)
{
return adap->params.offload;
......
此差异已折叠。
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
* Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CXGB4_FILTER_H
#define __CXGB4_FILTER_H
#include "t4_msg.h"
void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl);
void clear_filter(struct adapter *adap, struct filter_entry *f);
int set_filter_wr(struct adapter *adapter, int fidx);
int delete_filter(struct adapter *adapter, unsigned int fidx);
int writable_filter(struct filter_entry *f);
void clear_all_filters(struct adapter *adapter);
#endif /* __CXGB4_FILTER_H */
......@@ -67,6 +67,7 @@
#include <linux/crash_dump.h>
#include "cxgb4.h"
#include "cxgb4_filter.h"
#include "t4_regs.h"
#include "t4_values.h"
#include "t4_msg.h"
......@@ -77,6 +78,7 @@
#include "clip_tbl.h"
#include "l2t.h"
#include "sched.h"
#include "cxgb4_tc_u32.h"
char cxgb4_driver_name[] = KBUILD_MODNAME;
......@@ -87,30 +89,6 @@ char cxgb4_driver_name[] = KBUILD_MODNAME;
const char cxgb4_driver_version[] = DRV_VERSION;
#define DRV_DESC "Chelsio T4/T5/T6 Network Driver"
/* Host shadow copy of ingress filter entry. This is in host native format
* and doesn't match the ordering or bit order, etc. of the hardware of the
* firmware command. The use of bit-field structure elements is purely to
* remind ourselves of the field size limitations and save memory in the case
* where the filter table is large.
*/
struct filter_entry {
/* Administrative fields for filter.
*/
u32 valid:1; /* filter allocated and valid */
u32 locked:1; /* filter is administratively locked */
u32 pending:1; /* filter action is pending firmware reply */
u32 smtidx:8; /* Source MAC Table index for smac */
struct l2t_entry *l2t; /* Layer Two Table entry for dmac */
/* The filter itself. Most of this is a straight copy of information
* provided by the extended ioctl(). Some fields are translated to
* internal forms -- for instance the Ingress Queue ID passed in from
* the ioctl() is translated into the Absolute Ingress Queue ID.
*/
struct ch_filter_specification fs;
};
#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
......@@ -527,66 +505,6 @@ static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
}
#endif /* CONFIG_CHELSIO_T4_DCB */
/* Clear a filter and release any of its resources that we own. This also
* clears the filter's "pending" status.
*/
static void clear_filter(struct adapter *adap, struct filter_entry *f)
{
/* If the new or old filter have loopback rewriteing rules then we'll
* need to free any existing Layer Two Table (L2T) entries of the old
* filter rule. The firmware will handle freeing up any Source MAC
* Table (SMT) entries used for rewriting Source MAC Addresses in
* loopback rules.
*/
if (f->l2t)
cxgb4_l2t_release(f->l2t);
/* The zeroing of the filter rule below clears the filter valid,
* pending, locked flags, l2t pointer, etc. so it's all we need for
* this operation.
*/
memset(f, 0, sizeof(*f));
}
/* Handle a filter write/deletion reply.
*/
static void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
{
unsigned int idx = GET_TID(rpl);
unsigned int nidx = idx - adap->tids.ftid_base;
unsigned int ret;
struct filter_entry *f;
if (idx >= adap->tids.ftid_base && nidx <
(adap->tids.nftids + adap->tids.nsftids)) {
idx = nidx;
ret = TCB_COOKIE_G(rpl->cookie);
f = &adap->tids.ftid_tab[idx];
if (ret == FW_FILTER_WR_FLT_DELETED) {
/* Clear the filter when we get confirmation from the
* hardware that the filter has been deleted.
*/
clear_filter(adap, f);
} else if (ret == FW_FILTER_WR_SMT_TBL_FULL) {
dev_err(adap->pdev_dev, "filter %u setup failed due to full SMT\n",
idx);
clear_filter(adap, f);
} else if (ret == FW_FILTER_WR_FLT_ADDED) {
f->smtidx = (be64_to_cpu(rpl->oldval) >> 24) & 0xff;
f->pending = 0; /* asynchronous setup completed */
f->valid = 1;
} else {
/* Something went wrong. Issue a warning about the
* problem and clear everything out.
*/
dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
idx, ret);
clear_filter(adap, f);
}
}
}
/* Response queue handler for the FW event queue.
*/
static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
......@@ -1026,151 +944,6 @@ void t4_free_mem(void *addr)
kvfree(addr);
}
/* Send a Work Request to write the filter at a specified index. We construct
* a Firmware Filter Work Request to have the work done and put the indicated
* filter into "pending" mode which will prevent any further actions against
* it till we get a reply from the firmware on the completion status of the
* request.
*/
static int set_filter_wr(struct adapter *adapter, int fidx)
{
struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
struct sk_buff *skb;
struct fw_filter_wr *fwr;
unsigned int ftid;
skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
if (!skb)
return -ENOMEM;
/* If the new filter requires loopback Destination MAC and/or VLAN
* rewriting then we need to allocate a Layer 2 Table (L2T) entry for
* the filter.
*/
if (f->fs.newdmac || f->fs.newvlan) {
/* allocate L2T entry for new filter */
f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
f->fs.eport, f->fs.dmac);
if (f->l2t == NULL) {
kfree_skb(skb);
return -ENOMEM;
}
}
ftid = adapter->tids.ftid_base + fidx;
fwr = (struct fw_filter_wr *)__skb_put(skb, sizeof(*fwr));
memset(fwr, 0, sizeof(*fwr));
/* It would be nice to put most of the following in t4_hw.c but most
* of the work is translating the cxgbtool ch_filter_specification
* into the Work Request and the definition of that structure is
* currently in cxgbtool.h which isn't appropriate to pull into the
* common code. We may eventually try to come up with a more neutral
* filter specification structure but for now it's easiest to simply
* put this fairly direct code in line ...
*/
fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr)/16));
fwr->tid_to_iq =
htonl(FW_FILTER_WR_TID_V(ftid) |
FW_FILTER_WR_RQTYPE_V(f->fs.type) |
FW_FILTER_WR_NOREPLY_V(0) |
FW_FILTER_WR_IQ_V(f->fs.iq));
fwr->del_filter_to_l2tix =
htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
f->fs.newvlan == VLAN_REWRITE) |
FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
f->fs.newvlan == VLAN_REWRITE) |
FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
FW_FILTER_WR_PRIO_V(f->fs.prio) |
FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
fwr->ethtype = htons(f->fs.val.ethtype);
fwr->ethtypem = htons(f->fs.mask.ethtype);
fwr->frag_to_ovlan_vldm =
(FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
fwr->smac_sel = 0;
fwr->rx_chan_rx_rpl_iq =
htons(FW_FILTER_WR_RX_CHAN_V(0) |
FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
fwr->maci_to_matchtypem =
htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
FW_FILTER_WR_PORT_V(f->fs.val.iport) |
FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
fwr->ptcl = f->fs.val.proto;
fwr->ptclm = f->fs.mask.proto;
fwr->ttyp = f->fs.val.tos;
fwr->ttypm = f->fs.mask.tos;
fwr->ivlan = htons(f->fs.val.ivlan);
fwr->ivlanm = htons(f->fs.mask.ivlan);
fwr->ovlan = htons(f->fs.val.ovlan);
fwr->ovlanm = htons(f->fs.mask.ovlan);
memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
fwr->lp = htons(f->fs.val.lport);
fwr->lpm = htons(f->fs.mask.lport);
fwr->fp = htons(f->fs.val.fport);
fwr->fpm = htons(f->fs.mask.fport);
if (f->fs.newsmac)
memcpy(fwr->sma, f->fs.smac, sizeof(fwr->sma));
/* Mark the filter as "pending" and ship off the Filter Work Request.
* When we get the Work Request Reply we'll clear the pending status.
*/
f->pending = 1;
set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
t4_ofld_send(adapter, skb);
return 0;
}
/* Delete the filter at a specified index.
*/
static int del_filter_wr(struct adapter *adapter, int fidx)
{
struct filter_entry *f = &adapter->tids.ftid_tab[fidx];
struct sk_buff *skb;
struct fw_filter_wr *fwr;
unsigned int len, ftid;
len = sizeof(*fwr);
ftid = adapter->tids.ftid_base + fidx;
skb = alloc_skb(len, GFP_KERNEL);
if (!skb)
return -ENOMEM;
fwr = (struct fw_filter_wr *)__skb_put(skb, len);
t4_mk_filtdelwr(ftid, fwr, adapter->sge.fw_evtq.abs_id);
/* Mark the filter as "pending" and ship off the Filter Work Request.
* When we get the Work Request Reply we'll clear the pending status.
*/
f->pending = 1;
t4_mgmt_tx(adapter, skb);
return 0;
}
static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
......@@ -1552,19 +1325,22 @@ EXPORT_SYMBOL(cxgb4_remove_tid);
*/
static int tid_init(struct tid_info *t)
{
size_t size;
unsigned int stid_bmap_size;
unsigned int natids = t->natids;
struct adapter *adap = container_of(t, struct adapter, tids);
unsigned int max_ftids = t->nftids + t->nsftids;
unsigned int natids = t->natids;
unsigned int stid_bmap_size;
unsigned int ftid_bmap_size;
size_t size;
stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
ftid_bmap_size = BITS_TO_LONGS(t->nftids);
size = t->ntids * sizeof(*t->tid_tab) +
natids * sizeof(*t->atid_tab) +
t->nstids * sizeof(*t->stid_tab) +
t->nsftids * sizeof(*t->stid_tab) +
stid_bmap_size * sizeof(long) +
t->nftids * sizeof(*t->ftid_tab) +
t->nsftids * sizeof(*t->ftid_tab);
max_ftids * sizeof(*t->ftid_tab) +
ftid_bmap_size * sizeof(long);
t->tid_tab = t4_alloc_mem(size);
if (!t->tid_tab)
......@@ -1574,8 +1350,10 @@ static int tid_init(struct tid_info *t)
t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids + t->nsftids];
t->ftid_tab = (struct filter_entry *)&t->stid_bmap[stid_bmap_size];
t->ftid_bmap = (unsigned long *)&t->ftid_tab[max_ftids];
spin_lock_init(&t->stid_lock);
spin_lock_init(&t->atid_lock);
spin_lock_init(&t->ftid_lock);
t->stids_in_use = 0;
t->sftids_in_use = 0;
......@@ -1590,12 +1368,16 @@ static int tid_init(struct tid_info *t)
t->atid_tab[natids - 1].next = &t->atid_tab[natids];
t->afree = t->atid_tab;
}
bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
/* Reserve stid 0 for T4/T5 adapters */
if (!t->stid_base &&
(CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
__set_bit(0, t->stid_bmap);
if (is_offload(adap)) {
bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
/* Reserve stid 0 for T4/T5 adapters */
if (!t->stid_base &&
CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
__set_bit(0, t->stid_bmap);
}
bitmap_zero(t->ftid_bmap, t->nftids);
return 0;
}
......@@ -2514,40 +2296,6 @@ static int cxgb_close(struct net_device *dev)
return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
}
/* Return an error number if the indicated filter isn't writable ...
*/
static int writable_filter(struct filter_entry *f)
{
if (f->locked)
return -EPERM;
if (f->pending)
return -EBUSY;
return 0;
}
/* Delete the filter at the specified index (if valid). The checks for all
* the common problems with doing this like the filter being locked, currently
* pending in another operation, etc.
*/
static int delete_filter(struct adapter *adapter, unsigned int fidx)
{
struct filter_entry *f;
int ret;
if (fidx >= adapter->tids.nftids + adapter->tids.nsftids)
return -EINVAL;
f = &adapter->tids.ftid_tab[fidx];
ret = writable_filter(f);
if (ret)
return ret;
if (f->valid)
return del_filter_wr(adapter, fidx);
return 0;
}
int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
__be32 sip, __be16 sport, __be16 vlan,
unsigned int queue, unsigned char port, unsigned char mask)
......@@ -2964,6 +2712,35 @@ static int cxgb_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return err;
}
int cxgb_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
struct tc_to_netdev *tc)
{
struct port_info *pi = netdev2pinfo(dev);
struct adapter *adap = netdev2adap(dev);
if (!(adap->flags & FULL_INIT_DONE)) {
dev_err(adap->pdev_dev,
"Failed to setup tc on port %d. Link Down?\n",
pi->port_id);
return -EINVAL;
}
if (TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS) &&
tc->type == TC_SETUP_CLSU32) {
switch (tc->cls_u32->command) {
case TC_CLSU32_NEW_KNODE:
case TC_CLSU32_REPLACE_KNODE:
return cxgb4_config_knode(dev, proto, tc->cls_u32);
case TC_CLSU32_DELETE_KNODE:
return cxgb4_delete_knode(dev, proto, tc->cls_u32);
default:
return -EOPNOTSUPP;
}
}
return -EOPNOTSUPP;
}
static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_open = cxgb_open,
.ndo_stop = cxgb_close,
......@@ -2987,6 +2764,7 @@ static const struct net_device_ops cxgb4_netdev_ops = {
.ndo_busy_poll = cxgb_busy_poll,
#endif
.ndo_set_tx_maxrate = cxgb_set_tx_maxrate,
.ndo_setup_tc = cxgb_setup_tc,
};
#ifdef CONFIG_PCI_IOV
......@@ -4659,6 +4437,7 @@ static void free_some_resources(struct adapter *adapter)
t4_free_mem(adapter->l2t);
t4_cleanup_sched(adapter);
t4_free_mem(adapter->tids.tid_tab);
cxgb4_cleanup_tc_u32(adapter);
kfree(adapter->sge.egr_map);
kfree(adapter->sge.ingr_map);
kfree(adapter->sge.starving_fl);
......@@ -5003,7 +4782,8 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM | NETIF_F_RXHASH |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_TC;
if (highdma)
netdev->hw_features |= NETIF_F_HIGHDMA;
netdev->features |= netdev->hw_features;
......@@ -5087,10 +4867,16 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
i);
}
if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
if (tid_init(&adapter->tids) < 0) {
dev_warn(&pdev->dev, "could not allocate TID table, "
"continuing\n");
adapter->params.offload = 0;
} else {
adapter->tc_u32 = cxgb4_init_tc_u32(adapter,
CXGB4_MAX_LINK_HANDLE);
if (!adapter->tc_u32)
dev_warn(&pdev->dev,
"could not offload tc u32, continuing\n");
}
if (is_offload(adapter)) {
......@@ -5274,13 +5060,7 @@ static void remove_one(struct pci_dev *pdev)
/* If we allocated filters, free up state associated with any
* valid filters ...
*/
if (adapter->tids.ftid_tab) {
struct filter_entry *f = &adapter->tids.ftid_tab[0];
for (i = 0; i < (adapter->tids.nftids +
adapter->tids.nsftids); i++, f++)
if (f->valid)
clear_filter(adapter, f);
}
clear_all_filters(adapter);
if (adapter->flags & FULL_INIT_DONE)
cxgb_down(adapter);
......
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
* Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <net/tc_act/tc_gact.h>
#include <net/tc_act/tc_mirred.h>
#include "cxgb4.h"
#include "cxgb4_tc_u32_parse.h"
#include "cxgb4_tc_u32.h"
/* Fill ch_filter_specification with parsed match value/mask pair. */
static int fill_match_fields(struct adapter *adap,
struct ch_filter_specification *fs,
struct tc_cls_u32_offload *cls,
const struct cxgb4_match_field *entry,
bool next_header)
{
unsigned int i, j;
u32 val, mask;
int off, err;
bool found;
for (i = 0; i < cls->knode.sel->nkeys; i++) {
off = cls->knode.sel->keys[i].off;
val = cls->knode.sel->keys[i].val;
mask = cls->knode.sel->keys[i].mask;
if (next_header) {
/* For next headers, parse only keys with offmask */
if (!cls->knode.sel->keys[i].offmask)
continue;
} else {
/* For the remaining, parse only keys without offmask */
if (cls->knode.sel->keys[i].offmask)
continue;
}
found = false;
for (j = 0; entry[j].val; j++) {
if (off == entry[j].off) {
found = true;
err = entry[j].val(fs, val, mask);
if (err)
return err;
break;
}
}
if (!found)
return -EINVAL;
}
return 0;
}
/* Fill ch_filter_specification with parsed action. */
static int fill_action_fields(struct adapter *adap,
struct ch_filter_specification *fs,
struct tc_cls_u32_offload *cls)
{
unsigned int num_actions = 0;
const struct tc_action *a;
struct tcf_exts *exts;
LIST_HEAD(actions);
exts = cls->knode.exts;
if (tc_no_actions(exts))
return -EINVAL;
tcf_exts_to_list(exts, &actions);
list_for_each_entry(a, &actions, list) {
/* Don't allow more than one action per rule. */
if (num_actions)
return -EINVAL;
/* Drop in hardware. */
if (is_tcf_gact_shot(a)) {
fs->action = FILTER_DROP;
num_actions++;
continue;
}
/* Re-direct to specified port in hardware. */
if (is_tcf_mirred_redirect(a)) {
struct net_device *n_dev;
unsigned int i, index;
bool found = false;
index = tcf_mirred_ifindex(a);
for_each_port(adap, i) {
n_dev = adap->port[i];
if (index == n_dev->ifindex) {
fs->action = FILTER_SWITCH;
fs->eport = i;
found = true;
break;
}
}
/* Interface doesn't belong to any port of
* the underlying hardware.
*/
if (!found)
return -EINVAL;
num_actions++;
continue;
}
/* Un-supported action. */
return -EINVAL;
}
return 0;
}
int cxgb4_config_knode(struct net_device *dev, __be16 protocol,
struct tc_cls_u32_offload *cls)
{
const struct cxgb4_match_field *start, *link_start = NULL;
struct adapter *adapter = netdev2adap(dev);
struct ch_filter_specification fs;
struct cxgb4_tc_u32_table *t;
struct cxgb4_link *link;
unsigned int filter_id;
u32 uhtid, link_uhtid;
bool is_ipv6 = false;
int ret;
if (!can_tc_u32_offload(dev))
return -EOPNOTSUPP;
if (protocol != htons(ETH_P_IP) && protocol != htons(ETH_P_IPV6))
return -EOPNOTSUPP;
/* Fetch the location to insert the filter. */
filter_id = cls->knode.handle & 0xFFFFF;
if (filter_id > adapter->tids.nftids) {
dev_err(adapter->pdev_dev,
"Location %d out of range for insertion. Max: %d\n",
filter_id, adapter->tids.nftids);
return -ERANGE;
}
t = adapter->tc_u32;
uhtid = TC_U32_USERHTID(cls->knode.handle);
link_uhtid = TC_U32_USERHTID(cls->knode.link_handle);
/* Ensure that uhtid is either root u32 (i.e. 0x800)
* or a a valid linked bucket.
*/
if (uhtid != 0x800 && uhtid >= t->size)
return -EINVAL;
/* Ensure link handle uhtid is sane, if specified. */
if (link_uhtid >= t->size)
return -EINVAL;
memset(&fs, 0, sizeof(fs));
if (protocol == htons(ETH_P_IPV6)) {
start = cxgb4_ipv6_fields;
is_ipv6 = true;
} else {
start = cxgb4_ipv4_fields;
is_ipv6 = false;
}
if (uhtid != 0x800) {
/* Link must exist from root node before insertion. */
if (!t->table[uhtid - 1].link_handle)
return -EINVAL;
/* Link must have a valid supported next header. */
link_start = t->table[uhtid - 1].match_field;
if (!link_start)
return -EINVAL;
}
/* Parse links and record them for subsequent jumps to valid
* next headers.
*/
if (link_uhtid) {
const struct cxgb4_next_header *next;
bool found = false;
unsigned int i, j;
u32 val, mask;
int off;
if (t->table[link_uhtid - 1].link_handle) {
dev_err(adapter->pdev_dev,
"Link handle exists for: 0x%x\n",
link_uhtid);
return -EINVAL;
}
next = is_ipv6 ? cxgb4_ipv6_jumps : cxgb4_ipv4_jumps;
/* Try to find matches that allow jumps to next header. */
for (i = 0; next[i].jump; i++) {
if (next[i].offoff != cls->knode.sel->offoff ||
next[i].shift != cls->knode.sel->offshift ||
next[i].mask != cls->knode.sel->offmask ||
next[i].offset != cls->knode.sel->off)
continue;
/* Found a possible candidate. Find a key that
* matches the corresponding offset, value, and
* mask to jump to next header.
*/
for (j = 0; j < cls->knode.sel->nkeys; j++) {
off = cls->knode.sel->keys[j].off;
val = cls->knode.sel->keys[j].val;
mask = cls->knode.sel->keys[j].mask;
if (next[i].match_off == off &&
next[i].match_val == val &&
next[i].match_mask == mask) {
found = true;
break;
}
}
if (!found)
continue; /* Try next candidate. */
/* Candidate to jump to next header found.
* Translate all keys to internal specification
* and store them in jump table. This spec is copied
* later to set the actual filters.
*/
ret = fill_match_fields(adapter, &fs, cls,
start, false);
if (ret)
goto out;
link = &t->table[link_uhtid - 1];
link->match_field = next[i].jump;
link->link_handle = cls->knode.handle;
memcpy(&link->fs, &fs, sizeof(fs));
break;
}
/* No candidate found to jump to next header. */
if (!found)
return -EINVAL;
return 0;
}
/* Fill ch_filter_specification match fields to be shipped to hardware.
* Copy the linked spec (if any) first. And then update the spec as
* needed.
*/
if (uhtid != 0x800 && t->table[uhtid - 1].link_handle) {
/* Copy linked ch_filter_specification */
memcpy(&fs, &t->table[uhtid - 1].fs, sizeof(fs));
ret = fill_match_fields(adapter, &fs, cls,
link_start, true);
if (ret)
goto out;
}
ret = fill_match_fields(adapter, &fs, cls, start, false);
if (ret)
goto out;
/* Fill ch_filter_specification action fields to be shipped to
* hardware.
*/
ret = fill_action_fields(adapter, &fs, cls);
if (ret)
goto out;
/* The filter spec has been completely built from the info
* provided from u32. We now set some default fields in the
* spec for sanity.
*/
/* Match only packets coming from the ingress port where this
* filter will be created.
*/
fs.val.iport = netdev2pinfo(dev)->port_id;
fs.mask.iport = ~0;
/* Enable filter hit counts. */
fs.hitcnts = 1;
/* Set type of filter - IPv6 or IPv4 */
fs.type = is_ipv6 ? 1 : 0;
/* Set the filter */
ret = cxgb4_set_filter(dev, filter_id, &fs);
if (ret)
goto out;
/* If this is a linked bucket, then set the corresponding
* entry in the bitmap to mark it as belonging to this linked
* bucket.
*/
if (uhtid != 0x800 && t->table[uhtid - 1].link_handle)
set_bit(filter_id, t->table[uhtid - 1].tid_map);
out:
return ret;
}
int cxgb4_delete_knode(struct net_device *dev, __be16 protocol,
struct tc_cls_u32_offload *cls)
{
struct adapter *adapter = netdev2adap(dev);
unsigned int filter_id, max_tids, i, j;
struct cxgb4_link *link = NULL;
struct cxgb4_tc_u32_table *t;
u32 handle, uhtid;
int ret;
if (!can_tc_u32_offload(dev))
return -EOPNOTSUPP;
/* Fetch the location to delete the filter. */
filter_id = cls->knode.handle & 0xFFFFF;
if (filter_id > adapter->tids.nftids) {
dev_err(adapter->pdev_dev,
"Location %d out of range for deletion. Max: %d\n",
filter_id, adapter->tids.nftids);
return -ERANGE;
}
t = adapter->tc_u32;
handle = cls->knode.handle;
uhtid = TC_U32_USERHTID(cls->knode.handle);
/* Ensure that uhtid is either root u32 (i.e. 0x800)
* or a a valid linked bucket.
*/
if (uhtid != 0x800 && uhtid >= t->size)
return -EINVAL;
/* Delete the specified filter */
if (uhtid != 0x800) {
link = &t->table[uhtid - 1];
if (!link->link_handle)
return -EINVAL;
if (!test_bit(filter_id, link->tid_map))
return -EINVAL;
}
ret = cxgb4_del_filter(dev, filter_id);
if (ret)
goto out;
if (link)
clear_bit(filter_id, link->tid_map);
/* If a link is being deleted, then delete all filters
* associated with the link.
*/
max_tids = adapter->tids.nftids;
for (i = 0; i < t->size; i++) {
link = &t->table[i];
if (link->link_handle == handle) {
for (j = 0; j < max_tids; j++) {
if (!test_bit(j, link->tid_map))
continue;
ret = __cxgb4_del_filter(dev, j, NULL);
if (ret)
goto out;
clear_bit(j, link->tid_map);
}
/* Clear the link state */
link->match_field = NULL;
link->link_handle = 0;
memset(&link->fs, 0, sizeof(link->fs));
break;
}
}
out:
return ret;
}
void cxgb4_cleanup_tc_u32(struct adapter *adap)
{
struct cxgb4_tc_u32_table *t;
unsigned int i;
if (!adap->tc_u32)
return;
/* Free up all allocated memory. */
t = adap->tc_u32;
for (i = 0; i < t->size; i++) {
struct cxgb4_link *link = &t->table[i];
t4_free_mem(link->tid_map);
}
t4_free_mem(adap->tc_u32);
}
struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap,
unsigned int size)
{
struct cxgb4_tc_u32_table *t;
unsigned int i;
if (!size)
return NULL;
t = t4_alloc_mem(sizeof(*t) +
(size * sizeof(struct cxgb4_link)));
if (!t)
return NULL;
t->size = size;
for (i = 0; i < t->size; i++) {
struct cxgb4_link *link = &t->table[i];
unsigned int bmap_size;
unsigned int max_tids;
max_tids = adap->tids.nftids;
bmap_size = BITS_TO_LONGS(max_tids);
link->tid_map = t4_alloc_mem(sizeof(unsigned long) * bmap_size);
if (!link->tid_map)
goto out_no_mem;
bitmap_zero(link->tid_map, max_tids);
}
return t;
out_no_mem:
for (i = 0; i < t->size; i++) {
struct cxgb4_link *link = &t->table[i];
if (link->tid_map)
t4_free_mem(link->tid_map);
}
if (t)
t4_free_mem(t);
return NULL;
}
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
* Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CXGB4_TC_U32_H
#define __CXGB4_TC_U32_H
#include <net/pkt_cls.h>
#define CXGB4_MAX_LINK_HANDLE 32
static inline bool can_tc_u32_offload(struct net_device *dev)
{
struct adapter *adap = netdev2adap(dev);
return (dev->features & NETIF_F_HW_TC) && adap->tc_u32 ? true : false;
}
int cxgb4_config_knode(struct net_device *dev, __be16 protocol,
struct tc_cls_u32_offload *cls);
int cxgb4_delete_knode(struct net_device *dev, __be16 protocol,
struct tc_cls_u32_offload *cls);
void cxgb4_cleanup_tc_u32(struct adapter *adapter);
struct cxgb4_tc_u32_table *cxgb4_init_tc_u32(struct adapter *adap,
unsigned int size);
#endif /* __CXGB4_TC_U32_H */
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
* Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __CXGB4_TC_U32_PARSE_H
#define __CXGB4_TC_U32_PARSE_H
struct cxgb4_match_field {
int off; /* Offset from the beginning of the header to match */
/* Fill the value/mask pair in the spec if matched */
int (*val)(struct ch_filter_specification *f, u32 val, u32 mask);
};
/* IPv4 match fields */
static inline int cxgb4_fill_ipv4_tos(struct ch_filter_specification *f,
u32 val, u32 mask)
{
f->val.tos = (ntohl(val) >> 16) & 0x000000FF;
f->mask.tos = (ntohl(mask) >> 16) & 0x000000FF;
return 0;
}
static inline int cxgb4_fill_ipv4_frag(struct ch_filter_specification *f,
u32 val, u32 mask)
{
u32 mask_val;
u8 frag_val;
frag_val = (ntohl(val) >> 13) & 0x00000007;
mask_val = ntohl(mask) & 0x0000FFFF;
if (frag_val == 0x1 && mask_val != 0x3FFF) { /* MF set */
f->val.frag = 1;
f->mask.frag = 1;
} else if (frag_val == 0x2 && mask_val != 0x3FFF) { /* DF set */
f->val.frag = 0;
f->mask.frag = 1;
} else {
return -EINVAL;
}
return 0;
}
static inline int cxgb4_fill_ipv4_proto(struct ch_filter_specification *f,
u32 val, u32 mask)
{
f->val.proto = (ntohl(val) >> 16) & 0x000000FF;
f->mask.proto = (ntohl(mask) >> 16) & 0x000000FF;
return 0;
}
static inline int cxgb4_fill_ipv4_src_ip(struct ch_filter_specification *f,
u32 val, u32 mask)
{
memcpy(&f->val.fip[0], &val, sizeof(u32));
memcpy(&f->mask.fip[0], &mask, sizeof(u32));
return 0;
}
static inline int cxgb4_fill_ipv4_dst_ip(struct ch_filter_specification *f,
u32 val, u32 mask)
{
memcpy(&f->val.lip[0], &val, sizeof(u32));
memcpy(&f->mask.lip[0], &mask, sizeof(u32));
return 0;
}
static const struct cxgb4_match_field cxgb4_ipv4_fields[] = {
{ .off = 0, .val = cxgb4_fill_ipv4_tos },
{ .off = 4, .val = cxgb4_fill_ipv4_frag },
{ .off = 8, .val = cxgb4_fill_ipv4_proto },
{ .off = 12, .val = cxgb4_fill_ipv4_src_ip },
{ .off = 16, .val = cxgb4_fill_ipv4_dst_ip },
{ .val = NULL }
};
/* IPv6 match fields */
static inline int cxgb4_fill_ipv6_tos(struct ch_filter_specification *f,
u32 val, u32 mask)
{
f->val.tos = (ntohl(val) >> 20) & 0x000000FF;
f->mask.tos = (ntohl(mask) >> 20) & 0x000000FF;
return 0;
}
static inline int cxgb4_fill_ipv6_proto(struct ch_filter_specification *f,
u32 val, u32 mask)
{
f->val.proto = (ntohl(val) >> 8) & 0x000000FF;
f->mask.proto = (ntohl(mask) >> 8) & 0x000000FF;
return 0;
}
static inline int cxgb4_fill_ipv6_src_ip0(struct ch_filter_specification *f,
u32 val, u32 mask)
{
memcpy(&f->val.fip[0], &val, sizeof(u32));
memcpy(&f->mask.fip[0], &mask, sizeof(u32));
return 0;
}
static inline int cxgb4_fill_ipv6_src_ip1(struct ch_filter_specification *f,
u32 val, u32 mask)
{
memcpy(&f->val.fip[4], &val, sizeof(u32));
memcpy(&f->mask.fip[4], &mask, sizeof(u32));
return 0;
}
static inline int cxgb4_fill_ipv6_src_ip2(struct ch_filter_specification *f,
u32 val, u32 mask)
{
memcpy(&f->val.fip[8], &val, sizeof(u32));
memcpy(&f->mask.fip[8], &mask, sizeof(u32));
return 0;
}
static inline int cxgb4_fill_ipv6_src_ip3(struct ch_filter_specification *f,
u32 val, u32 mask)
{
memcpy(&f->val.fip[12], &val, sizeof(u32));
memcpy(&f->mask.fip[12], &mask, sizeof(u32));
return 0;
}
static inline int cxgb4_fill_ipv6_dst_ip0(struct ch_filter_specification *f,
u32 val, u32 mask)
{
memcpy(&f->val.lip[0], &val, sizeof(u32));
memcpy(&f->mask.lip[0], &mask, sizeof(u32));
return 0;
}
static inline int cxgb4_fill_ipv6_dst_ip1(struct ch_filter_specification *f,
u32 val, u32 mask)
{
memcpy(&f->val.lip[4], &val, sizeof(u32));
memcpy(&f->mask.lip[4], &mask, sizeof(u32));
return 0;
}
static inline int cxgb4_fill_ipv6_dst_ip2(struct ch_filter_specification *f,
u32 val, u32 mask)
{
memcpy(&f->val.lip[8], &val, sizeof(u32));
memcpy(&f->mask.lip[8], &mask, sizeof(u32));
return 0;
}
static inline int cxgb4_fill_ipv6_dst_ip3(struct ch_filter_specification *f,
u32 val, u32 mask)
{
memcpy(&f->val.lip[12], &val, sizeof(u32));
memcpy(&f->mask.lip[12], &mask, sizeof(u32));
return 0;
}
static const struct cxgb4_match_field cxgb4_ipv6_fields[] = {
{ .off = 0, .val = cxgb4_fill_ipv6_tos },
{ .off = 4, .val = cxgb4_fill_ipv6_proto },
{ .off = 8, .val = cxgb4_fill_ipv6_src_ip0 },
{ .off = 12, .val = cxgb4_fill_ipv6_src_ip1 },
{ .off = 16, .val = cxgb4_fill_ipv6_src_ip2 },
{ .off = 20, .val = cxgb4_fill_ipv6_src_ip3 },
{ .off = 24, .val = cxgb4_fill_ipv6_dst_ip0 },
{ .off = 28, .val = cxgb4_fill_ipv6_dst_ip1 },
{ .off = 32, .val = cxgb4_fill_ipv6_dst_ip2 },
{ .off = 36, .val = cxgb4_fill_ipv6_dst_ip3 },
{ .val = NULL }
};
/* TCP/UDP match */
static inline int cxgb4_fill_l4_ports(struct ch_filter_specification *f,
u32 val, u32 mask)
{
f->val.fport = ntohl(val) >> 16;
f->mask.fport = ntohl(mask) >> 16;
f->val.lport = ntohl(val) & 0x0000FFFF;
f->mask.lport = ntohl(mask) & 0x0000FFFF;
return 0;
};
static const struct cxgb4_match_field cxgb4_tcp_fields[] = {
{ .off = 0, .val = cxgb4_fill_l4_ports },
{ .val = NULL }
};
static const struct cxgb4_match_field cxgb4_udp_fields[] = {
{ .off = 0, .val = cxgb4_fill_l4_ports },
{ .val = NULL }
};
struct cxgb4_next_header {
unsigned int offset; /* Offset to next header */
/* offset, shift, and mask added to offset above
* to get to next header. Useful when using a header
* field's value to jump to next header such as IHL field
* in IPv4 header.
*/
unsigned int offoff;
u32 shift;
u32 mask;
/* match criteria to make this jump */
unsigned int match_off;
u32 match_val;
u32 match_mask;
/* location of jump to make */
const struct cxgb4_match_field *jump;
};
/* Accept a rule with a jump to transport layer header based on IHL field in
* IPv4 header.
*/
static const struct cxgb4_next_header cxgb4_ipv4_jumps[] = {
{ .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
.match_off = 8, .match_val = 0x600, .match_mask = 0xFF00,
.jump = cxgb4_tcp_fields },
{ .offset = 0, .offoff = 0, .shift = 6, .mask = 0xF,
.match_off = 8, .match_val = 0x1100, .match_mask = 0xFF00,
.jump = cxgb4_udp_fields },
{ .jump = NULL }
};
/* Accept a rule with a jump directly past the 40 Bytes of IPv6 fixed header
* to get to transport layer header.
*/
static const struct cxgb4_next_header cxgb4_ipv6_jumps[] = {
{ .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
.match_off = 4, .match_val = 0x60000, .match_mask = 0xFF0000,
.jump = cxgb4_tcp_fields },
{ .offset = 0x28, .offoff = 0, .shift = 0, .mask = 0,
.match_off = 4, .match_val = 0x110000, .match_mask = 0xFF0000,
.jump = cxgb4_udp_fields },
{ .jump = NULL }
};
struct cxgb4_link {
const struct cxgb4_match_field *match_field; /* Next header */
struct ch_filter_specification fs; /* Match spec associated with link */
u32 link_handle; /* Knode handle associated with the link */
unsigned long *tid_map; /* Bitmap for filter tids */
};
struct cxgb4_tc_u32_table {
unsigned int size; /* number of entries in table */
struct cxgb4_link table[0]; /* Jump table */
};
#endif /* __CXGB4_TC_U32_PARSE_H */
/*
* This file is part of the Chelsio T4 Ethernet driver for Linux.
*
* Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
* Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
......@@ -106,6 +106,7 @@ struct tid_info {
unsigned int atid_base;
struct filter_entry *ftid_tab;
unsigned long *ftid_bmap;
unsigned int nftids;
unsigned int ftid_base;
unsigned int aftid_base;
......@@ -126,6 +127,8 @@ struct tid_info {
atomic_t tids_in_use;
/* TIDs in the HASH */
atomic_t hash_tids_in_use;
/* lock for setting/clearing filter bitmap */
spinlock_t ftid_lock;
};
static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
......@@ -185,6 +188,27 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
unsigned int queue, bool ipv6);
/* Filter operation context to allow callers of cxgb4_set_filter() and
* cxgb4_del_filter() to wait for an asynchronous completion.
*/
struct filter_ctx {
struct completion completion; /* completion rendezvous */
void *closure; /* caller's opaque information */
int result; /* result of operation */
u32 tid; /* to store tid */
};
struct ch_filter_specification;
int __cxgb4_set_filter(struct net_device *dev, int filter_id,
struct ch_filter_specification *fs,
struct filter_ctx *ctx);
int __cxgb4_del_filter(struct net_device *dev, int filter_id,
struct filter_ctx *ctx);
int cxgb4_set_filter(struct net_device *dev, int filter_id,
struct ch_filter_specification *fs);
int cxgb4_del_filter(struct net_device *dev, int filter_id);
static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
{
skb_set_queue_mapping(skb, (queue << 1) | prio);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册