提交 a477605f 编写于 作者: D David S. Miller

Merge branch 'dpaa2-eth-add-PFC-support'

Ioana Ciornei says:

====================
dpaa2-eth: add PFC support

This patch set adds support for Priority Flow Control in DPAA2 Ethernet
devices.

The first patch make the necessary changes so that multiple
traffic classes are configured. The dequeue priority
of the maximum 8 traffic classes is configured to be equal.
The second patch adds a static distribution to said traffic
classes based on the VLAN PCP field. In the future, this could be
extended through the .setapp() DCB callback for dynamic configuration.

Also, add support for the congestion group taildrop mechanism that
allows us to control the number of frames that can accumulate on a group
of Rx frame queues belonging to the same traffic class.

The basic subset of the DCB ops is implemented so that the user can
query the number of PFC capable traffic classes, their state and
reconfigure them if necessary.

Changes in v3:
 - add patches 6-7 which add the PFC functionality
 - patch 2/7: revert to explicitly cast mask to u16 * to not get into
   sparse warnings
Changes in v4:
 - really fix the sparse warnings in 2/7
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -9,6 +9,16 @@ config FSL_DPAA2_ETH
The driver manages network objects discovered on the Freescale
MC bus.
if FSL_DPAA2_ETH
config FSL_DPAA2_ETH_DCB
bool "Data Center Bridging (DCB) Support"
default n
depends on DCB
help
Enable Priority-Based Flow Control (PFC) support for DPAA2 Ethernet
devices.
endif
config FSL_DPAA2_PTP_CLOCK
tristate "Freescale DPAA2 PTP Clock"
depends on FSL_DPAA2_ETH && PTP_1588_CLOCK_QORIQ
......
......@@ -7,6 +7,7 @@ obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o
obj-$(CONFIG_FSL_DPAA2_PTP_CLOCK) += fsl-dpaa2-ptp.o
fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o
fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o
fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o
fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o
......
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause)
/* Copyright 2020 NXP */
#include "dpaa2-eth.h"
static int dpaa2_eth_dcbnl_ieee_getpfc(struct net_device *net_dev,
struct ieee_pfc *pfc)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
if (!(priv->link_state.options & DPNI_LINK_OPT_PFC_PAUSE))
return 0;
memcpy(pfc, &priv->pfc, sizeof(priv->pfc));
pfc->pfc_cap = dpaa2_eth_tc_count(priv);
return 0;
}
static inline bool is_prio_enabled(u8 pfc_en, u8 tc)
{
return !!(pfc_en & (1 << tc));
}
static int set_pfc_cn(struct dpaa2_eth_priv *priv, u8 pfc_en)
{
struct dpni_congestion_notification_cfg cfg = {0};
int i, err;
cfg.notification_mode = DPNI_CONG_OPT_FLOW_CONTROL;
cfg.units = DPNI_CONGESTION_UNIT_FRAMES;
cfg.message_iova = 0ULL;
cfg.message_ctx = 0ULL;
for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
if (is_prio_enabled(pfc_en, i)) {
cfg.threshold_entry = DPAA2_ETH_CN_THRESH_ENTRY(priv);
cfg.threshold_exit = DPAA2_ETH_CN_THRESH_EXIT(priv);
} else {
/* For priorities not set in the pfc_en mask, we leave
* the congestion thresholds at zero, which effectively
* disables generation of PFC frames for them
*/
cfg.threshold_entry = 0;
cfg.threshold_exit = 0;
}
err = dpni_set_congestion_notification(priv->mc_io, 0,
priv->mc_token,
DPNI_QUEUE_RX, i, &cfg);
if (err) {
netdev_err(priv->net_dev,
"dpni_set_congestion_notification failed\n");
return err;
}
}
return 0;
}
static int dpaa2_eth_dcbnl_ieee_setpfc(struct net_device *net_dev,
struct ieee_pfc *pfc)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
struct dpni_link_cfg link_cfg = {0};
bool tx_pause;
int err;
if (pfc->mbc || pfc->delay)
return -EOPNOTSUPP;
/* If same PFC enabled mask, nothing to do */
if (priv->pfc.pfc_en == pfc->pfc_en)
return 0;
/* We allow PFC configuration even if it won't have any effect until
* general pause frames are enabled
*/
tx_pause = dpaa2_eth_tx_pause_enabled(priv->link_state.options);
if (!dpaa2_eth_rx_pause_enabled(priv->link_state.options) || !tx_pause)
netdev_warn(net_dev, "Pause support must be enabled in order for PFC to work!\n");
link_cfg.rate = priv->link_state.rate;
link_cfg.options = priv->link_state.options;
if (pfc->pfc_en)
link_cfg.options |= DPNI_LINK_OPT_PFC_PAUSE;
else
link_cfg.options &= ~DPNI_LINK_OPT_PFC_PAUSE;
err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &link_cfg);
if (err) {
netdev_err(net_dev, "dpni_set_link_cfg failed\n");
return err;
}
/* Configure congestion notifications for the enabled priorities */
err = set_pfc_cn(priv, pfc->pfc_en);
if (err)
return err;
memcpy(&priv->pfc, pfc, sizeof(priv->pfc));
priv->pfc_enabled = !!pfc->pfc_en;
dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
return 0;
}
static u8 dpaa2_eth_dcbnl_getdcbx(struct net_device *net_dev)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
return priv->dcbx_mode;
}
static u8 dpaa2_eth_dcbnl_setdcbx(struct net_device *net_dev, u8 mode)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
return (mode != (priv->dcbx_mode)) ? 1 : 0;
}
static u8 dpaa2_eth_dcbnl_getcap(struct net_device *net_dev, int capid, u8 *cap)
{
struct dpaa2_eth_priv *priv = netdev_priv(net_dev);
switch (capid) {
case DCB_CAP_ATTR_PFC:
*cap = true;
break;
case DCB_CAP_ATTR_PFC_TCS:
*cap = 1 << (dpaa2_eth_tc_count(priv) - 1);
break;
case DCB_CAP_ATTR_DCBX:
*cap = priv->dcbx_mode;
break;
default:
*cap = false;
break;
}
return 0;
}
const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops = {
.ieee_getpfc = dpaa2_eth_dcbnl_ieee_getpfc,
.ieee_setpfc = dpaa2_eth_dcbnl_ieee_setpfc,
.getdcbx = dpaa2_eth_dcbnl_getdcbx,
.setdcbx = dpaa2_eth_dcbnl_setdcbx,
.getcap = dpaa2_eth_dcbnl_getcap,
};
......@@ -81,8 +81,8 @@ static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
int i, err;
seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name);
seq_printf(file, "%s%16s%16s%16s%16s\n",
"VFQID", "CPU", "Type", "Frames", "Pending frames");
seq_printf(file, "%s%16s%16s%16s%16s%16s\n",
"VFQID", "CPU", "TC", "Type", "Frames", "Pending frames");
for (i = 0; i < priv->num_fqs; i++) {
fq = &priv->fq[i];
......@@ -90,9 +90,10 @@ static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset)
if (err)
fcnt = 0;
seq_printf(file, "%5d%16d%16s%16llu%16u\n",
seq_printf(file, "%5d%16d%16d%16s%16llu%16u\n",
fq->fqid,
fq->target_cpu,
fq->tc,
fq_type_to_str(fq),
fq->stats.frames,
fcnt);
......
......@@ -1287,31 +1287,67 @@ static void disable_ch_napi(struct dpaa2_eth_priv *priv)
}
}
static void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv, bool enable)
void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
bool tx_pause, bool pfc)
{
struct dpni_taildrop td = {0};
struct dpaa2_eth_fq *fq;
int i, err;
if (priv->rx_td_enabled == enable)
return;
/* FQ taildrop: threshold is in bytes, per frame queue. Enabled if
* flow control is disabled (as it might interfere with either the
* buffer pool depletion trigger for pause frames or with the group
* congestion trigger for PFC frames)
*/
td.enable = !tx_pause;
if (priv->rx_fqtd_enabled == td.enable)
goto set_cgtd;
td.enable = enable;
td.threshold = DPAA2_ETH_TAILDROP_THRESH;
td.threshold = DPAA2_ETH_FQ_TAILDROP_THRESH;
td.units = DPNI_CONGESTION_UNIT_BYTES;
for (i = 0; i < priv->num_fqs; i++) {
if (priv->fq[i].type != DPAA2_RX_FQ)
fq = &priv->fq[i];
if (fq->type != DPAA2_RX_FQ)
continue;
err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
DPNI_CP_QUEUE, DPNI_QUEUE_RX, 0,
priv->fq[i].flowid, &td);
DPNI_CP_QUEUE, DPNI_QUEUE_RX,
fq->tc, fq->flowid, &td);
if (err) {
netdev_err(priv->net_dev,
"dpni_set_taildrop() failed\n");
break;
"dpni_set_taildrop(FQ) failed\n");
return;
}
}
priv->rx_td_enabled = enable;
priv->rx_fqtd_enabled = td.enable;
set_cgtd:
/* Congestion group taildrop: threshold is in frames, per group
* of FQs belonging to the same traffic class
* Enabled if general Tx pause disabled or if PFCs are enabled
* (congestion group threhsold for PFC generation is lower than the
* CG taildrop threshold, so it won't interfere with it; we also
* want frames in non-PFC enabled traffic classes to be kept in check)
*/
td.enable = !tx_pause || (tx_pause && pfc);
if (priv->rx_cgtd_enabled == td.enable)
return;
td.threshold = DPAA2_ETH_CG_TAILDROP_THRESH(priv);
td.units = DPNI_CONGESTION_UNIT_FRAMES;
for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
err = dpni_set_taildrop(priv->mc_io, 0, priv->mc_token,
DPNI_CP_GROUP, DPNI_QUEUE_RX,
i, 0, &td);
if (err) {
netdev_err(priv->net_dev,
"dpni_set_taildrop(CG) failed\n");
return;
}
}
priv->rx_cgtd_enabled = td.enable;
}
static int link_state_update(struct dpaa2_eth_priv *priv)
......@@ -1331,9 +1367,8 @@ static int link_state_update(struct dpaa2_eth_priv *priv)
* Rx FQ taildrop configuration as well. We configure taildrop
* only when pause frame generation is disabled.
*/
tx_pause = !!(state.options & DPNI_LINK_OPT_PAUSE) ^
!!(state.options & DPNI_LINK_OPT_ASYM_PAUSE);
dpaa2_eth_set_rx_taildrop(priv, !tx_pause);
tx_pause = dpaa2_eth_tx_pause_enabled(state.options);
dpaa2_eth_set_rx_taildrop(priv, tx_pause, priv->pfc_enabled);
/* When we manage the MAC/PHY using phylink there is no need
* to manually update the netif_carrier.
......@@ -2407,7 +2442,7 @@ static void set_fq_affinity(struct dpaa2_eth_priv *priv)
static void setup_fqs(struct dpaa2_eth_priv *priv)
{
int i;
int i, j;
/* We have one TxConf FQ per Tx flow.
* The number of Tx and Rx queues is the same.
......@@ -2419,10 +2454,13 @@ static void setup_fqs(struct dpaa2_eth_priv *priv)
priv->fq[priv->num_fqs++].flowid = (u16)i;
}
for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
priv->fq[priv->num_fqs++].flowid = (u16)i;
for (j = 0; j < dpaa2_eth_tc_count(priv); j++) {
for (i = 0; i < dpaa2_eth_queue_count(priv); i++) {
priv->fq[priv->num_fqs].type = DPAA2_RX_FQ;
priv->fq[priv->num_fqs].consume = dpaa2_eth_rx;
priv->fq[priv->num_fqs].tc = (u8)j;
priv->fq[priv->num_fqs++].flowid = (u16)i;
}
}
/* For each FQ, decide on which core to process incoming frames */
......@@ -2691,6 +2729,118 @@ static void update_tx_fqids(struct dpaa2_eth_priv *priv)
priv->enqueue = dpaa2_eth_enqueue_qd;
}
/* Configure ingress classification based on VLAN PCP */
static int set_vlan_qos(struct dpaa2_eth_priv *priv)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpkg_profile_cfg kg_cfg = {0};
struct dpni_qos_tbl_cfg qos_cfg = {0};
struct dpni_rule_cfg key_params;
void *dma_mem, *key, *mask;
u8 key_size = 2; /* VLAN TCI field */
int i, pcp, err;
/* VLAN-based classification only makes sense if we have multiple
* traffic classes.
* Also, we need to extract just the 3-bit PCP field from the VLAN
* header and we can only do that by using a mask
*/
if (dpaa2_eth_tc_count(priv) == 1 || !dpaa2_eth_fs_mask_enabled(priv)) {
dev_dbg(dev, "VLAN-based QoS classification not supported\n");
return -EOPNOTSUPP;
}
dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL);
if (!dma_mem)
return -ENOMEM;
kg_cfg.num_extracts = 1;
kg_cfg.extracts[0].type = DPKG_EXTRACT_FROM_HDR;
kg_cfg.extracts[0].extract.from_hdr.prot = NET_PROT_VLAN;
kg_cfg.extracts[0].extract.from_hdr.type = DPKG_FULL_FIELD;
kg_cfg.extracts[0].extract.from_hdr.field = NH_FLD_VLAN_TCI;
err = dpni_prepare_key_cfg(&kg_cfg, dma_mem);
if (err) {
dev_err(dev, "dpni_prepare_key_cfg failed\n");
goto out_free_tbl;
}
/* set QoS table */
qos_cfg.default_tc = 0;
qos_cfg.discard_on_miss = 0;
qos_cfg.key_cfg_iova = dma_map_single(dev, dma_mem,
DPAA2_CLASSIFIER_DMA_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, qos_cfg.key_cfg_iova)) {
dev_err(dev, "QoS table DMA mapping failed\n");
err = -ENOMEM;
goto out_free_tbl;
}
err = dpni_set_qos_table(priv->mc_io, 0, priv->mc_token, &qos_cfg);
if (err) {
dev_err(dev, "dpni_set_qos_table failed\n");
goto out_unmap_tbl;
}
/* Add QoS table entries */
key = kzalloc(key_size * 2, GFP_KERNEL);
if (!key) {
err = -ENOMEM;
goto out_unmap_tbl;
}
mask = key + key_size;
*(__be16 *)mask = cpu_to_be16(VLAN_PRIO_MASK);
key_params.key_iova = dma_map_single(dev, key, key_size * 2,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, key_params.key_iova)) {
dev_err(dev, "Qos table entry DMA mapping failed\n");
err = -ENOMEM;
goto out_free_key;
}
key_params.mask_iova = key_params.key_iova + key_size;
key_params.key_size = key_size;
/* We add rules for PCP-based distribution starting with highest
* priority (VLAN PCP = 7). If this DPNI doesn't have enough traffic
* classes to accommodate all priority levels, the lowest ones end up
* on TC 0 which was configured as default
*/
for (i = dpaa2_eth_tc_count(priv) - 1, pcp = 7; i >= 0; i--, pcp--) {
*(__be16 *)key = cpu_to_be16(pcp << VLAN_PRIO_SHIFT);
dma_sync_single_for_device(dev, key_params.key_iova,
key_size * 2, DMA_TO_DEVICE);
err = dpni_add_qos_entry(priv->mc_io, 0, priv->mc_token,
&key_params, i, i);
if (err) {
dev_err(dev, "dpni_add_qos_entry failed\n");
dpni_clear_qos_table(priv->mc_io, 0, priv->mc_token);
goto out_unmap_key;
}
}
priv->vlan_cls_enabled = true;
/* Table and key memory is not persistent, clean everything up after
* configuration is finished
*/
out_unmap_key:
dma_unmap_single(dev, key_params.key_iova, key_size * 2, DMA_TO_DEVICE);
out_free_key:
kfree(key);
out_unmap_tbl:
dma_unmap_single(dev, qos_cfg.key_cfg_iova, DPAA2_CLASSIFIER_DMA_SIZE,
DMA_TO_DEVICE);
out_free_tbl:
kfree(dma_mem);
return err;
}
/* Configure the DPNI object this interface is associated with */
static int setup_dpni(struct fsl_mc_device *ls_dev)
{
......@@ -2753,6 +2903,10 @@ static int setup_dpni(struct fsl_mc_device *ls_dev)
goto close;
}
err = set_vlan_qos(priv);
if (err && err != -EOPNOTSUPP)
goto close;
priv->cls_rules = devm_kzalloc(dev, sizeof(struct dpaa2_eth_cls_rule) *
dpaa2_eth_fs_count(priv), GFP_KERNEL);
if (!priv->cls_rules) {
......@@ -2789,7 +2943,7 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
int err;
err = dpni_get_queue(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_RX, 0, fq->flowid, &queue, &qid);
DPNI_QUEUE_RX, fq->tc, fq->flowid, &queue, &qid);
if (err) {
dev_err(dev, "dpni_get_queue(RX) failed\n");
return err;
......@@ -2802,7 +2956,7 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
queue.destination.priority = 1;
queue.user_context = (u64)(uintptr_t)fq;
err = dpni_set_queue(priv->mc_io, 0, priv->mc_token,
DPNI_QUEUE_RX, 0, fq->flowid,
DPNI_QUEUE_RX, fq->tc, fq->flowid,
DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST,
&queue);
if (err) {
......@@ -2811,6 +2965,10 @@ static int setup_rx_flow(struct dpaa2_eth_priv *priv,
}
/* xdp_rxq setup */
/* only once for each channel */
if (fq->tc > 0)
return 0;
err = xdp_rxq_info_reg(&fq->channel->xdp_rxq, priv->net_dev,
fq->flowid);
if (err) {
......@@ -2948,7 +3106,7 @@ static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_tc_dist_cfg dist_cfg;
int err;
int i, err = 0;
memset(&dist_cfg, 0, sizeof(dist_cfg));
......@@ -2956,9 +3114,14 @@ static int config_legacy_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
dist_cfg.dist_mode = DPNI_DIST_MODE_HASH;
err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg);
if (err)
dev_err(dev, "dpni_set_rx_tc_dist failed\n");
for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token,
i, &dist_cfg);
if (err) {
dev_err(dev, "dpni_set_rx_tc_dist failed\n");
break;
}
}
return err;
}
......@@ -2968,7 +3131,7 @@ static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_dist_cfg dist_cfg;
int err;
int i, err = 0;
memset(&dist_cfg, 0, sizeof(dist_cfg));
......@@ -2976,9 +3139,15 @@ static int config_hash_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
dist_cfg.enable = 1;
err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
if (err)
dev_err(dev, "dpni_set_rx_hash_dist failed\n");
for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
dist_cfg.tc = i;
err = dpni_set_rx_hash_dist(priv->mc_io, 0, priv->mc_token,
&dist_cfg);
if (err) {
dev_err(dev, "dpni_set_rx_hash_dist failed\n");
break;
}
}
return err;
}
......@@ -2988,7 +3157,7 @@ static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
{
struct device *dev = priv->net_dev->dev.parent;
struct dpni_rx_dist_cfg dist_cfg;
int err;
int i, err = 0;
memset(&dist_cfg, 0, sizeof(dist_cfg));
......@@ -2996,9 +3165,15 @@ static int config_cls_key(struct dpaa2_eth_priv *priv, dma_addr_t key)
dist_cfg.dist_size = dpaa2_eth_queue_count(priv);
dist_cfg.enable = 1;
err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token, &dist_cfg);
if (err)
dev_err(dev, "dpni_set_rx_fs_dist failed\n");
for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
dist_cfg.tc = i;
err = dpni_set_rx_fs_dist(priv->mc_io, 0, priv->mc_token,
&dist_cfg);
if (err) {
dev_err(dev, "dpni_set_rx_fs_dist failed\n");
break;
}
}
return err;
}
......@@ -3684,6 +3859,15 @@ static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev)
if (err)
goto err_alloc_rings;
#ifdef CONFIG_FSL_DPAA2_ETH_DCB
if (dpaa2_eth_has_pause_support(priv) && priv->vlan_cls_enabled) {
priv->dcbx_mode = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
net_dev->dcbnl_ops = &dpaa2_eth_dcbnl_ops;
} else {
dev_dbg(dev, "PFC not supported\n");
}
#endif
err = setup_irqs(dpni_dev);
if (err) {
netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n");
......
......@@ -6,6 +6,7 @@
#ifndef __DPAA2_ETH_H
#define __DPAA2_ETH_H
#include <linux/dcbnl.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/fsl/mc.h>
......@@ -36,27 +37,46 @@
/* Convert L3 MTU to L2 MFL */
#define DPAA2_ETH_L2_MAX_FRM(mtu) ((mtu) + VLAN_ETH_HLEN)
/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo
* frames in the Rx queues (length of the current frame is not
* taken into account when making the taildrop decision)
/* Set the taildrop threshold (in bytes) to allow the enqueue of a large
* enough number of jumbo frames in the Rx queues (length of the current
* frame is not taken into account when making the taildrop decision)
*/
#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024)
#define DPAA2_ETH_FQ_TAILDROP_THRESH (1024 * 1024)
/* Maximum number of Tx confirmation frames to be processed
* in a single NAPI call
*/
#define DPAA2_ETH_TXCONF_PER_NAPI 256
/* Buffer quota per queue. Must be large enough such that for minimum sized
* frames taildrop kicks in before the bpool gets depleted, so we compute
* how many 64B frames fit inside the taildrop threshold and add a margin
* to accommodate the buffer refill delay.
/* Buffer qouta per channel. We want to keep in check number of ingress frames
* in flight: for small sized frames, congestion group taildrop may kick in
* first; for large sizes, Rx FQ taildrop threshold will ensure only a
* reasonable number of frames will be pending at any given time.
* Ingress frame drop due to buffer pool depletion should be a corner case only
*/
#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64)
#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256)
#define DPAA2_ETH_NUM_BUFS 1280
#define DPAA2_ETH_REFILL_THRESH \
(DPAA2_ETH_NUM_BUFS - DPAA2_ETH_BUFS_PER_CMD)
/* Congestion group taildrop threshold: number of frames allowed to accumulate
* at any moment in a group of Rx queues belonging to the same traffic class.
* Choose value such that we don't risk depleting the buffer pool before the
* taildrop kicks in
*/
#define DPAA2_ETH_CG_TAILDROP_THRESH(priv) \
(1024 * dpaa2_eth_queue_count(priv) / dpaa2_eth_tc_count(priv))
/* Congestion group notification threshold: when this many frames accumulate
* on the Rx queues belonging to the same TC, the MAC is instructed to send
* PFC frames for that TC.
* When number of pending frames drops below exit threshold transmission of
* PFC frames is stopped.
*/
#define DPAA2_ETH_CN_THRESH_ENTRY(priv) \
(DPAA2_ETH_CG_TAILDROP_THRESH(priv) / 2)
#define DPAA2_ETH_CN_THRESH_EXIT(priv) \
(DPAA2_ETH_CN_THRESH_ENTRY(priv) * 3 / 4)
/* Maximum number of buffers that can be acquired/released through a single
* QBMan command
*/
......@@ -294,7 +314,9 @@ struct dpaa2_eth_ch_stats {
/* Maximum number of queues associated with a DPNI */
#define DPAA2_ETH_MAX_TCS 8
#define DPAA2_ETH_MAX_RX_QUEUES 16
#define DPAA2_ETH_MAX_RX_QUEUES_PER_TC 16
#define DPAA2_ETH_MAX_RX_QUEUES \
(DPAA2_ETH_MAX_RX_QUEUES_PER_TC * DPAA2_ETH_MAX_TCS)
#define DPAA2_ETH_MAX_TX_QUEUES 16
#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \
DPAA2_ETH_MAX_TX_QUEUES)
......@@ -414,7 +436,8 @@ struct dpaa2_eth_priv {
struct dpaa2_eth_drv_stats __percpu *percpu_extras;
u16 mc_token;
u8 rx_td_enabled;
u8 rx_fqtd_enabled;
u8 rx_cgtd_enabled;
struct dpni_link_state link_state;
bool do_link_poll;
......@@ -425,6 +448,12 @@ struct dpaa2_eth_priv {
u64 rx_cls_fields;
struct dpaa2_eth_cls_rule *cls_rules;
u8 rx_cls_enabled;
u8 vlan_cls_enabled;
u8 pfc_enabled;
#ifdef CONFIG_FSL_DPAA2_ETH_DCB
u8 dcbx_mode;
struct ieee_pfc pfc;
#endif
struct bpf_prog *xdp_prog;
#ifdef CONFIG_DEBUG_FS
struct dpaa2_debugfs dbg;
......@@ -507,6 +536,17 @@ enum dpaa2_eth_rx_dist {
(dpaa2_eth_cmp_dpni_ver((priv), DPNI_PAUSE_VER_MAJOR, \
DPNI_PAUSE_VER_MINOR) >= 0)
static inline bool dpaa2_eth_tx_pause_enabled(u64 link_options)
{
return !!(link_options & DPNI_LINK_OPT_PAUSE) ^
!!(link_options & DPNI_LINK_OPT_ASYM_PAUSE);
}
static inline bool dpaa2_eth_rx_pause_enabled(u64 link_options)
{
return !!(link_options & DPNI_LINK_OPT_PAUSE);
}
static inline
unsigned int dpaa2_eth_needed_headroom(struct dpaa2_eth_priv *priv,
struct sk_buff *skb)
......@@ -546,4 +586,9 @@ int dpaa2_eth_cls_key_size(u64 key);
int dpaa2_eth_cls_fld_off(int prot, int field);
void dpaa2_eth_cls_trim_rule(void *key_mem, u64 fields);
void dpaa2_eth_set_rx_taildrop(struct dpaa2_eth_priv *priv,
bool tx_pause, bool pfc);
extern const struct dcbnl_rtnl_ops dpaa2_eth_dcbnl_ops;
#endif /* __DPAA2_H */
......@@ -130,9 +130,8 @@ static void dpaa2_eth_get_pauseparam(struct net_device *net_dev,
return;
}
pause->rx_pause = !!(link_options & DPNI_LINK_OPT_PAUSE);
pause->tx_pause = pause->rx_pause ^
!!(link_options & DPNI_LINK_OPT_ASYM_PAUSE);
pause->rx_pause = dpaa2_eth_rx_pause_enabled(link_options);
pause->tx_pause = dpaa2_eth_tx_pause_enabled(link_options);
pause->autoneg = AUTONEG_DISABLE;
}
......@@ -547,7 +546,7 @@ static int do_cls_rule(struct net_device *net_dev,
dma_addr_t key_iova;
u64 fields = 0;
void *key_buf;
int err;
int i, err;
if (fs->ring_cookie != RX_CLS_FLOW_DISC &&
fs->ring_cookie >= dpaa2_eth_queue_count(priv))
......@@ -607,11 +606,18 @@ static int do_cls_rule(struct net_device *net_dev,
fs_act.options |= DPNI_FS_OPT_DISCARD;
else
fs_act.flow_id = fs->ring_cookie;
err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
fs->location, &rule_cfg, &fs_act);
} else {
err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0,
&rule_cfg);
}
for (i = 0; i < dpaa2_eth_tc_count(priv); i++) {
if (add)
err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token,
i, fs->location, &rule_cfg,
&fs_act);
else
err = dpni_remove_fs_entry(priv->mc_io, 0,
priv->mc_token, i,
&rule_cfg);
if (err)
break;
}
dma_unmap_single(dev, key_iova, rule_cfg.key_size * 2, DMA_TO_DEVICE);
......
......@@ -59,6 +59,10 @@
#define DPNI_CMDID_SET_RX_TC_DIST DPNI_CMD(0x235)
#define DPNI_CMDID_SET_QOS_TBL DPNI_CMD(0x240)
#define DPNI_CMDID_ADD_QOS_ENT DPNI_CMD(0x241)
#define DPNI_CMDID_REMOVE_QOS_ENT DPNI_CMD(0x242)
#define DPNI_CMDID_CLR_QOS_TBL DPNI_CMD(0x243)
#define DPNI_CMDID_ADD_FS_ENT DPNI_CMD(0x244)
#define DPNI_CMDID_REMOVE_FS_ENT DPNI_CMD(0x245)
#define DPNI_CMDID_CLR_FS_ENT DPNI_CMD(0x246)
......@@ -567,4 +571,59 @@ struct dpni_cmd_remove_fs_entry {
__le64 mask_iova;
};
#define DPNI_DISCARD_ON_MISS_SHIFT 0
#define DPNI_DISCARD_ON_MISS_SIZE 1
struct dpni_cmd_set_qos_table {
__le32 pad;
u8 default_tc;
/* only the LSB */
u8 discard_on_miss;
__le16 pad1[21];
__le64 key_cfg_iova;
};
struct dpni_cmd_add_qos_entry {
__le16 pad;
u8 tc_id;
u8 key_size;
__le16 index;
__le16 pad1;
__le64 key_iova;
__le64 mask_iova;
};
struct dpni_cmd_remove_qos_entry {
u8 pad[3];
u8 key_size;
__le32 pad1;
__le64 key_iova;
__le64 mask_iova;
};
#define DPNI_DEST_TYPE_SHIFT 0
#define DPNI_DEST_TYPE_SIZE 4
#define DPNI_CONG_UNITS_SHIFT 4
#define DPNI_CONG_UNITS_SIZE 2
struct dpni_cmd_set_congestion_notification {
/* cmd word 0 */
u8 qtype;
u8 tc;
u8 pad[6];
/* cmd word 1 */
__le32 dest_id;
__le16 notification_mode;
u8 dest_priority;
/* from LSB: dest_type: 4 units:2 */
u8 type_units;
/* cmd word 2 */
__le64 message_iova;
/* cmd word 3 */
__le64 message_ctx;
/* cmd word 4 */
__le32 threshold_entry;
__le32 threshold_exit;
};
#endif /* _FSL_DPNI_CMD_H */
......@@ -1354,6 +1354,52 @@ int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io,
return mc_send_command(mc_io, &cmd);
}
/**
* dpni_set_congestion_notification() - Set traffic class congestion
* notification configuration
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @qtype: Type of queue - Rx, Tx and Tx confirm types are supported
* @tc_id: Traffic class selection (0-7)
* @cfg: Congestion notification configuration
*
* Return: '0' on Success; error code otherwise.
*/
int dpni_set_congestion_notification(
struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
enum dpni_queue_type qtype,
u8 tc_id,
const struct dpni_congestion_notification_cfg *cfg)
{
struct dpni_cmd_set_congestion_notification *cmd_params;
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header =
mc_encode_cmd_header(DPNI_CMDID_SET_CONGESTION_NOTIFICATION,
cmd_flags,
token);
cmd_params = (struct dpni_cmd_set_congestion_notification *)cmd.params;
cmd_params->qtype = qtype;
cmd_params->tc = tc_id;
cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
cmd_params->notification_mode = cpu_to_le16(cfg->notification_mode);
cmd_params->dest_priority = cfg->dest_cfg.priority;
dpni_set_field(cmd_params->type_units, DEST_TYPE,
cfg->dest_cfg.dest_type);
dpni_set_field(cmd_params->type_units, CONG_UNITS, cfg->units);
cmd_params->message_iova = cpu_to_le64(cfg->message_iova);
cmd_params->message_ctx = cpu_to_le64(cfg->message_ctx);
cmd_params->threshold_entry = cpu_to_le32(cfg->threshold_entry);
cmd_params->threshold_exit = cpu_to_le32(cfg->threshold_exit);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpni_set_queue() - Set queue parameters
* @mc_io: Pointer to MC portal's I/O object
......@@ -1786,3 +1832,134 @@ int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpni_set_qos_table() - Set QoS mapping table
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @cfg: QoS table configuration
*
* This function and all QoS-related functions require that
*'max_tcs > 1' was set at DPNI creation.
*
* warning: Before calling this function, call dpkg_prepare_key_cfg() to
* prepare the key_cfg_iova parameter
*
* Return: '0' on Success; Error code otherwise.
*/
int dpni_set_qos_table(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
const struct dpni_qos_tbl_cfg *cfg)
{
struct dpni_cmd_set_qos_table *cmd_params;
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL,
cmd_flags,
token);
cmd_params = (struct dpni_cmd_set_qos_table *)cmd.params;
cmd_params->default_tc = cfg->default_tc;
cmd_params->key_cfg_iova = cpu_to_le64(cfg->key_cfg_iova);
dpni_set_field(cmd_params->discard_on_miss, DISCARD_ON_MISS,
cfg->discard_on_miss);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class)
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @cfg: QoS rule to add
* @tc_id: Traffic class selection (0-7)
* @index: Location in the QoS table where to insert the entry.
* Only relevant if MASKING is enabled for QoS classification on
* this DPNI, it is ignored for exact match.
*
* Return: '0' on Success; Error code otherwise.
*/
int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
const struct dpni_rule_cfg *cfg,
u8 tc_id,
u16 index)
{
struct dpni_cmd_add_qos_entry *cmd_params;
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT,
cmd_flags,
token);
cmd_params = (struct dpni_cmd_add_qos_entry *)cmd.params;
cmd_params->tc_id = tc_id;
cmd_params->key_size = cfg->key_size;
cmd_params->index = cpu_to_le16(index);
cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpni_remove_qos_entry() - Remove QoS mapping entry
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
* @cfg: QoS rule to remove
*
* Return: '0' on Success; Error code otherwise.
*/
int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
const struct dpni_rule_cfg *cfg)
{
struct dpni_cmd_remove_qos_entry *cmd_params;
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT,
cmd_flags,
token);
cmd_params = (struct dpni_cmd_remove_qos_entry *)cmd.params;
cmd_params->key_size = cfg->key_size;
cmd_params->key_iova = cpu_to_le64(cfg->key_iova);
cmd_params->mask_iova = cpu_to_le64(cfg->mask_iova);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
/**
* dpni_clear_qos_table() - Clear all QoS mapping entries
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPNI object
*
* Following this function call, all frames are directed to
* the default traffic class (0)
*
* Return: '0' on Success; Error code otherwise.
*/
int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token)
{
struct fsl_mc_command cmd = { 0 };
/* prepare command */
cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL,
cmd_flags,
token);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
}
......@@ -513,6 +513,11 @@ int dpni_get_statistics(struct fsl_mc_io *mc_io,
*/
#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL
/**
* Enable priority flow control pause frames
*/
#define DPNI_LINK_OPT_PFC_PAUSE 0x0000000000000010ULL
/**
* struct - Structure representing DPNI link configuration
* @rate: Rate
......@@ -715,6 +720,26 @@ int dpni_set_rx_hash_dist(struct fsl_mc_io *mc_io,
u16 token,
const struct dpni_rx_dist_cfg *cfg);
/**
* struct dpni_qos_tbl_cfg - Structure representing QOS table configuration
* @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with
* key extractions to be used as the QoS criteria by calling
* dpkg_prepare_key_cfg()
* @discard_on_miss: Set to '1' to discard frames in case of no match (miss);
* '0' to use the 'default_tc' in such cases
* @default_tc: Used in case of no-match and 'discard_on_miss'= 0
*/
struct dpni_qos_tbl_cfg {
u64 key_cfg_iova;
int discard_on_miss;
u8 default_tc;
};
int dpni_set_qos_table(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
const struct dpni_qos_tbl_cfg *cfg);
/**
* enum dpni_dest - DPNI destination types
* @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and
......@@ -857,6 +882,62 @@ enum dpni_congestion_point {
DPNI_CP_GROUP,
};
/**
* struct dpni_dest_cfg - Structure representing DPNI destination parameters
* @dest_type: Destination type
* @dest_id: Either DPIO ID or DPCON ID, depending on the destination type
* @priority: Priority selection within the DPIO or DPCON channel; valid
* values are 0-1 or 0-7, depending on the number of priorities
* in that channel; not relevant for 'DPNI_DEST_NONE' option
*/
struct dpni_dest_cfg {
enum dpni_dest dest_type;
int dest_id;
u8 priority;
};
/* DPNI congestion options */
/**
* This congestion will trigger flow control or priority flow control.
* This will have effect only if flow control is enabled with
* dpni_set_link_cfg().
*/
#define DPNI_CONG_OPT_FLOW_CONTROL 0x00000040
/**
* struct dpni_congestion_notification_cfg - congestion notification
* configuration
* @units: Units type
* @threshold_entry: Above this threshold we enter a congestion state.
* set it to '0' to disable it
* @threshold_exit: Below this threshold we exit the congestion state.
* @message_ctx: The context that will be part of the CSCN message
* @message_iova: I/O virtual address (must be in DMA-able memory),
* must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_<X>'
* is contained in 'options'
* @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel
* @notification_mode: Mask of available options; use 'DPNI_CONG_OPT_<X>' values
*/
struct dpni_congestion_notification_cfg {
enum dpni_congestion_unit units;
u32 threshold_entry;
u32 threshold_exit;
u64 message_ctx;
u64 message_iova;
struct dpni_dest_cfg dest_cfg;
u16 notification_mode;
};
int dpni_set_congestion_notification(
struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
enum dpni_queue_type qtype,
u8 tc_id,
const struct dpni_congestion_notification_cfg *cfg);
/**
* struct dpni_taildrop - Structure representing the taildrop
* @enable: Indicates whether the taildrop is active or not.
......@@ -961,6 +1042,22 @@ int dpni_remove_fs_entry(struct fsl_mc_io *mc_io,
u8 tc_id,
const struct dpni_rule_cfg *cfg);
int dpni_add_qos_entry(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
const struct dpni_rule_cfg *cfg,
u8 tc_id,
u16 index);
int dpni_remove_qos_entry(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token,
const struct dpni_rule_cfg *cfg);
int dpni_clear_qos_table(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 token);
int dpni_get_api_version(struct fsl_mc_io *mc_io,
u32 cmd_flags,
u16 *major_ver,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册