提交 30129cf2 编写于 作者: D David S. Miller

Merge branch 'for-davem' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc-next

Ben Hutchings says:

====================
1. Merge sfc changes (only) accepted for 3.9.

2. PTP improvements from Laurence Evans.

3. Overhaul of RX buffer management:
- Always allocate pages, and enable scattering where possible
- Fit as many buffers as will fit into a page, rather than limiting to 2
- Introduce recycle rings to reduce the need for IOMMU mapping and
  unmapping

4. PCI error recovery (AER and EEH) implementation.

5. Fix a bug in RX filter replacement.

6. Fix configuration with 1 RX queue in the PF and multiple RX queues in
VFs.
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
...@@ -21,7 +21,9 @@ ...@@ -21,7 +21,9 @@
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <linux/topology.h> #include <linux/topology.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/pci.h>
#include <linux/cpu_rmap.h> #include <linux/cpu_rmap.h>
#include <linux/aer.h>
#include "net_driver.h" #include "net_driver.h"
#include "efx.h" #include "efx.h"
#include "nic.h" #include "nic.h"
...@@ -71,21 +73,21 @@ const char *const efx_loopback_mode_names[] = { ...@@ -71,21 +73,21 @@ const char *const efx_loopback_mode_names[] = {
const unsigned int efx_reset_type_max = RESET_TYPE_MAX; const unsigned int efx_reset_type_max = RESET_TYPE_MAX;
const char *const efx_reset_type_names[] = { const char *const efx_reset_type_names[] = {
[RESET_TYPE_INVISIBLE] = "INVISIBLE", [RESET_TYPE_INVISIBLE] = "INVISIBLE",
[RESET_TYPE_ALL] = "ALL", [RESET_TYPE_ALL] = "ALL",
[RESET_TYPE_WORLD] = "WORLD", [RESET_TYPE_RECOVER_OR_ALL] = "RECOVER_OR_ALL",
[RESET_TYPE_DISABLE] = "DISABLE", [RESET_TYPE_WORLD] = "WORLD",
[RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG", [RESET_TYPE_RECOVER_OR_DISABLE] = "RECOVER_OR_DISABLE",
[RESET_TYPE_INT_ERROR] = "INT_ERROR", [RESET_TYPE_DISABLE] = "DISABLE",
[RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY", [RESET_TYPE_TX_WATCHDOG] = "TX_WATCHDOG",
[RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH", [RESET_TYPE_INT_ERROR] = "INT_ERROR",
[RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH", [RESET_TYPE_RX_RECOVERY] = "RX_RECOVERY",
[RESET_TYPE_TX_SKIP] = "TX_SKIP", [RESET_TYPE_RX_DESC_FETCH] = "RX_DESC_FETCH",
[RESET_TYPE_MC_FAILURE] = "MC_FAILURE", [RESET_TYPE_TX_DESC_FETCH] = "TX_DESC_FETCH",
[RESET_TYPE_TX_SKIP] = "TX_SKIP",
[RESET_TYPE_MC_FAILURE] = "MC_FAILURE",
}; };
#define EFX_MAX_MTU (9 * 1024)
/* Reset workqueue. If any NIC has a hardware failure then a reset will be /* Reset workqueue. If any NIC has a hardware failure then a reset will be
* queued onto this work queue. This is not a per-nic work queue, because * queued onto this work queue. This is not a per-nic work queue, because
* efx_reset_work() acquires the rtnl lock, so resets are naturally serialised. * efx_reset_work() acquires the rtnl lock, so resets are naturally serialised.
...@@ -117,9 +119,12 @@ MODULE_PARM_DESC(separate_tx_channels, ...@@ -117,9 +119,12 @@ MODULE_PARM_DESC(separate_tx_channels,
static int napi_weight = 64; static int napi_weight = 64;
/* This is the time (in jiffies) between invocations of the hardware /* This is the time (in jiffies) between invocations of the hardware
* monitor. On Falcon-based NICs, this will: * monitor.
* On Falcon-based NICs, this will:
* - Check the on-board hardware monitor; * - Check the on-board hardware monitor;
* - Poll the link state and reconfigure the hardware as necessary. * - Poll the link state and reconfigure the hardware as necessary.
* On Siena-based NICs for power systems with EEH support, this will give EEH a
* chance to start.
*/ */
static unsigned int efx_monitor_interval = 1 * HZ; static unsigned int efx_monitor_interval = 1 * HZ;
...@@ -203,13 +208,14 @@ static void efx_stop_all(struct efx_nic *efx); ...@@ -203,13 +208,14 @@ static void efx_stop_all(struct efx_nic *efx);
#define EFX_ASSERT_RESET_SERIALISED(efx) \ #define EFX_ASSERT_RESET_SERIALISED(efx) \
do { \ do { \
if ((efx->state == STATE_READY) || \ if ((efx->state == STATE_READY) || \
(efx->state == STATE_RECOVERY) || \
(efx->state == STATE_DISABLED)) \ (efx->state == STATE_DISABLED)) \
ASSERT_RTNL(); \ ASSERT_RTNL(); \
} while (0) } while (0)
static int efx_check_disabled(struct efx_nic *efx) static int efx_check_disabled(struct efx_nic *efx)
{ {
if (efx->state == STATE_DISABLED) { if (efx->state == STATE_DISABLED || efx->state == STATE_RECOVERY) {
netif_err(efx, drv, efx->net_dev, netif_err(efx, drv, efx->net_dev,
"device is disabled due to earlier errors\n"); "device is disabled due to earlier errors\n");
return -EIO; return -EIO;
...@@ -242,15 +248,9 @@ static int efx_process_channel(struct efx_channel *channel, int budget) ...@@ -242,15 +248,9 @@ static int efx_process_channel(struct efx_channel *channel, int budget)
struct efx_rx_queue *rx_queue = struct efx_rx_queue *rx_queue =
efx_channel_get_rx_queue(channel); efx_channel_get_rx_queue(channel);
/* Deliver last RX packet. */ efx_rx_flush_packet(channel);
if (channel->rx_pkt) { if (rx_queue->enabled)
__efx_rx_packet(channel, channel->rx_pkt);
channel->rx_pkt = NULL;
}
if (rx_queue->enabled) {
efx_rx_strategy(channel);
efx_fast_push_rx_descriptors(rx_queue); efx_fast_push_rx_descriptors(rx_queue);
}
} }
return spent; return spent;
...@@ -625,20 +625,51 @@ static int efx_probe_channels(struct efx_nic *efx) ...@@ -625,20 +625,51 @@ static int efx_probe_channels(struct efx_nic *efx)
*/ */
static void efx_start_datapath(struct efx_nic *efx) static void efx_start_datapath(struct efx_nic *efx)
{ {
bool old_rx_scatter = efx->rx_scatter;
struct efx_tx_queue *tx_queue; struct efx_tx_queue *tx_queue;
struct efx_rx_queue *rx_queue; struct efx_rx_queue *rx_queue;
struct efx_channel *channel; struct efx_channel *channel;
size_t rx_buf_len;
/* Calculate the rx buffer allocation parameters required to /* Calculate the rx buffer allocation parameters required to
* support the current MTU, including padding for header * support the current MTU, including padding for header
* alignment and overruns. * alignment and overruns.
*/ */
efx->rx_buffer_len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + efx->rx_dma_len = (efx->type->rx_buffer_hash_size +
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_hash_size + efx->type->rx_buffer_padding);
efx->type->rx_buffer_padding); rx_buf_len = (sizeof(struct efx_rx_page_state) +
efx->rx_buffer_order = get_order(efx->rx_buffer_len + EFX_PAGE_IP_ALIGN + efx->rx_dma_len);
sizeof(struct efx_rx_page_state)); if (rx_buf_len <= PAGE_SIZE) {
efx->rx_scatter = false;
efx->rx_buffer_order = 0;
} else if (efx->type->can_rx_scatter) {
BUILD_BUG_ON(sizeof(struct efx_rx_page_state) +
EFX_PAGE_IP_ALIGN + EFX_RX_USR_BUF_SIZE >
PAGE_SIZE / 2);
efx->rx_scatter = true;
efx->rx_dma_len = EFX_RX_USR_BUF_SIZE;
efx->rx_buffer_order = 0;
} else {
efx->rx_scatter = false;
efx->rx_buffer_order = get_order(rx_buf_len);
}
efx_rx_config_page_split(efx);
if (efx->rx_buffer_order)
netif_dbg(efx, drv, efx->net_dev,
"RX buf len=%u; page order=%u batch=%u\n",
efx->rx_dma_len, efx->rx_buffer_order,
efx->rx_pages_per_batch);
else
netif_dbg(efx, drv, efx->net_dev,
"RX buf len=%u step=%u bpp=%u; page batch=%u\n",
efx->rx_dma_len, efx->rx_page_buf_step,
efx->rx_bufs_per_page, efx->rx_pages_per_batch);
/* RX filters also have scatter-enabled flags */
if (efx->rx_scatter != old_rx_scatter)
efx_filter_update_rx_scatter(efx);
/* We must keep at least one descriptor in a TX ring empty. /* We must keep at least one descriptor in a TX ring empty.
* We could avoid this when the queue size does not exactly * We could avoid this when the queue size does not exactly
...@@ -655,16 +686,12 @@ static void efx_start_datapath(struct efx_nic *efx) ...@@ -655,16 +686,12 @@ static void efx_start_datapath(struct efx_nic *efx)
efx_for_each_channel_tx_queue(tx_queue, channel) efx_for_each_channel_tx_queue(tx_queue, channel)
efx_init_tx_queue(tx_queue); efx_init_tx_queue(tx_queue);
/* The rx buffer allocation strategy is MTU dependent */
efx_rx_strategy(channel);
efx_for_each_channel_rx_queue(rx_queue, channel) { efx_for_each_channel_rx_queue(rx_queue, channel) {
efx_init_rx_queue(rx_queue); efx_init_rx_queue(rx_queue);
efx_nic_generate_fill_event(rx_queue); efx_nic_generate_fill_event(rx_queue);
} }
WARN_ON(channel->rx_pkt != NULL); WARN_ON(channel->rx_pkt_n_frags);
efx_rx_strategy(channel);
} }
if (netif_device_present(efx->net_dev)) if (netif_device_present(efx->net_dev))
...@@ -683,7 +710,7 @@ static void efx_stop_datapath(struct efx_nic *efx) ...@@ -683,7 +710,7 @@ static void efx_stop_datapath(struct efx_nic *efx)
BUG_ON(efx->port_enabled); BUG_ON(efx->port_enabled);
/* Only perform flush if dma is enabled */ /* Only perform flush if dma is enabled */
if (dev->is_busmaster) { if (dev->is_busmaster && efx->state != STATE_RECOVERY) {
rc = efx_nic_flush_queues(efx); rc = efx_nic_flush_queues(efx);
if (rc && EFX_WORKAROUND_7803(efx)) { if (rc && EFX_WORKAROUND_7803(efx)) {
...@@ -1596,13 +1623,15 @@ static void efx_start_all(struct efx_nic *efx) ...@@ -1596,13 +1623,15 @@ static void efx_start_all(struct efx_nic *efx)
efx_start_port(efx); efx_start_port(efx);
efx_start_datapath(efx); efx_start_datapath(efx);
/* Start the hardware monitor if there is one. Otherwise (we're link /* Start the hardware monitor if there is one */
* event driven), we have to poll the PHY because after an event queue if (efx->type->monitor != NULL)
* flush, we could have a missed a link state change */
if (efx->type->monitor != NULL) {
queue_delayed_work(efx->workqueue, &efx->monitor_work, queue_delayed_work(efx->workqueue, &efx->monitor_work,
efx_monitor_interval); efx_monitor_interval);
} else {
/* If link state detection is normally event-driven, we have
* to poll now because we could have missed a change
*/
if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
if (efx->phy_op->poll(efx)) if (efx->phy_op->poll(efx))
efx_link_status_changed(efx); efx_link_status_changed(efx);
...@@ -2309,7 +2338,9 @@ int efx_reset(struct efx_nic *efx, enum reset_type method) ...@@ -2309,7 +2338,9 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
out: out:
/* Leave device stopped if necessary */ /* Leave device stopped if necessary */
disabled = rc || method == RESET_TYPE_DISABLE; disabled = rc ||
method == RESET_TYPE_DISABLE ||
method == RESET_TYPE_RECOVER_OR_DISABLE;
rc2 = efx_reset_up(efx, method, !disabled); rc2 = efx_reset_up(efx, method, !disabled);
if (rc2) { if (rc2) {
disabled = true; disabled = true;
...@@ -2328,13 +2359,48 @@ int efx_reset(struct efx_nic *efx, enum reset_type method) ...@@ -2328,13 +2359,48 @@ int efx_reset(struct efx_nic *efx, enum reset_type method)
return rc; return rc;
} }
/* Try recovery mechanisms.
* For now only EEH is supported.
* Returns 0 if the recovery mechanisms are unsuccessful.
* Returns a non-zero value otherwise.
*/
static int efx_try_recovery(struct efx_nic *efx)
{
#ifdef CONFIG_EEH
/* A PCI error can occur and not be seen by EEH because nothing
* happens on the PCI bus. In this case the driver may fail and
* schedule a 'recover or reset', leading to this recovery handler.
* Manually call the eeh failure check function.
*/
struct eeh_dev *eehdev =
of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev));
if (eeh_dev_check_failure(eehdev)) {
/* The EEH mechanisms will handle the error and reset the
* device if necessary.
*/
return 1;
}
#endif
return 0;
}
/* The worker thread exists so that code that cannot sleep can /* The worker thread exists so that code that cannot sleep can
* schedule a reset for later. * schedule a reset for later.
*/ */
static void efx_reset_work(struct work_struct *data) static void efx_reset_work(struct work_struct *data)
{ {
struct efx_nic *efx = container_of(data, struct efx_nic, reset_work); struct efx_nic *efx = container_of(data, struct efx_nic, reset_work);
unsigned long pending = ACCESS_ONCE(efx->reset_pending); unsigned long pending;
enum reset_type method;
pending = ACCESS_ONCE(efx->reset_pending);
method = fls(pending) - 1;
if ((method == RESET_TYPE_RECOVER_OR_DISABLE ||
method == RESET_TYPE_RECOVER_OR_ALL) &&
efx_try_recovery(efx))
return;
if (!pending) if (!pending)
return; return;
...@@ -2346,7 +2412,7 @@ static void efx_reset_work(struct work_struct *data) ...@@ -2346,7 +2412,7 @@ static void efx_reset_work(struct work_struct *data)
* it cannot change again. * it cannot change again.
*/ */
if (efx->state == STATE_READY) if (efx->state == STATE_READY)
(void)efx_reset(efx, fls(pending) - 1); (void)efx_reset(efx, method);
rtnl_unlock(); rtnl_unlock();
} }
...@@ -2355,11 +2421,20 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) ...@@ -2355,11 +2421,20 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type)
{ {
enum reset_type method; enum reset_type method;
if (efx->state == STATE_RECOVERY) {
netif_dbg(efx, drv, efx->net_dev,
"recovering: skip scheduling %s reset\n",
RESET_TYPE(type));
return;
}
switch (type) { switch (type) {
case RESET_TYPE_INVISIBLE: case RESET_TYPE_INVISIBLE:
case RESET_TYPE_ALL: case RESET_TYPE_ALL:
case RESET_TYPE_RECOVER_OR_ALL:
case RESET_TYPE_WORLD: case RESET_TYPE_WORLD:
case RESET_TYPE_DISABLE: case RESET_TYPE_DISABLE:
case RESET_TYPE_RECOVER_OR_DISABLE:
method = type; method = type;
netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n", netif_dbg(efx, drv, efx->net_dev, "scheduling %s reset\n",
RESET_TYPE(method)); RESET_TYPE(method));
...@@ -2569,6 +2644,8 @@ static void efx_pci_remove(struct pci_dev *pci_dev) ...@@ -2569,6 +2644,8 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
efx_fini_struct(efx); efx_fini_struct(efx);
pci_set_drvdata(pci_dev, NULL); pci_set_drvdata(pci_dev, NULL);
free_netdev(efx->net_dev); free_netdev(efx->net_dev);
pci_disable_pcie_error_reporting(pci_dev);
}; };
/* NIC VPD information /* NIC VPD information
...@@ -2741,6 +2818,11 @@ static int efx_pci_probe(struct pci_dev *pci_dev, ...@@ -2741,6 +2818,11 @@ static int efx_pci_probe(struct pci_dev *pci_dev,
netif_warn(efx, probe, efx->net_dev, netif_warn(efx, probe, efx->net_dev,
"failed to create MTDs (%d)\n", rc); "failed to create MTDs (%d)\n", rc);
rc = pci_enable_pcie_error_reporting(pci_dev);
if (rc && rc != -EINVAL)
netif_warn(efx, probe, efx->net_dev,
"pci_enable_pcie_error_reporting failed (%d)\n", rc);
return 0; return 0;
fail4: fail4:
...@@ -2865,12 +2947,112 @@ static const struct dev_pm_ops efx_pm_ops = { ...@@ -2865,12 +2947,112 @@ static const struct dev_pm_ops efx_pm_ops = {
.restore = efx_pm_resume, .restore = efx_pm_resume,
}; };
/* A PCI error affecting this device was detected.
* At this point MMIO and DMA may be disabled.
* Stop the software path and request a slot reset.
*/
pci_ers_result_t efx_io_error_detected(struct pci_dev *pdev,
enum pci_channel_state state)
{
pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
struct efx_nic *efx = pci_get_drvdata(pdev);
if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT;
rtnl_lock();
if (efx->state != STATE_DISABLED) {
efx->state = STATE_RECOVERY;
efx->reset_pending = 0;
efx_device_detach_sync(efx);
efx_stop_all(efx);
efx_stop_interrupts(efx, false);
status = PCI_ERS_RESULT_NEED_RESET;
} else {
/* If the interface is disabled we don't want to do anything
* with it.
*/
status = PCI_ERS_RESULT_RECOVERED;
}
rtnl_unlock();
pci_disable_device(pdev);
return status;
}
/* Fake a successfull reset, which will be performed later in efx_io_resume. */
pci_ers_result_t efx_io_slot_reset(struct pci_dev *pdev)
{
struct efx_nic *efx = pci_get_drvdata(pdev);
pci_ers_result_t status = PCI_ERS_RESULT_RECOVERED;
int rc;
if (pci_enable_device(pdev)) {
netif_err(efx, hw, efx->net_dev,
"Cannot re-enable PCI device after reset.\n");
status = PCI_ERS_RESULT_DISCONNECT;
}
rc = pci_cleanup_aer_uncorrect_error_status(pdev);
if (rc) {
netif_err(efx, hw, efx->net_dev,
"pci_cleanup_aer_uncorrect_error_status failed (%d)\n", rc);
/* Non-fatal error. Continue. */
}
return status;
}
/* Perform the actual reset and resume I/O operations. */
static void efx_io_resume(struct pci_dev *pdev)
{
struct efx_nic *efx = pci_get_drvdata(pdev);
int rc;
rtnl_lock();
if (efx->state == STATE_DISABLED)
goto out;
rc = efx_reset(efx, RESET_TYPE_ALL);
if (rc) {
netif_err(efx, hw, efx->net_dev,
"efx_reset failed after PCI error (%d)\n", rc);
} else {
efx->state = STATE_READY;
netif_dbg(efx, hw, efx->net_dev,
"Done resetting and resuming IO after PCI error.\n");
}
out:
rtnl_unlock();
}
/* For simplicity and reliability, we always require a slot reset and try to
* reset the hardware when a pci error affecting the device is detected.
* We leave both the link_reset and mmio_enabled callback unimplemented:
* with our request for slot reset the mmio_enabled callback will never be
* called, and the link_reset callback is not used by AER or EEH mechanisms.
*/
static struct pci_error_handlers efx_err_handlers = {
.error_detected = efx_io_error_detected,
.slot_reset = efx_io_slot_reset,
.resume = efx_io_resume,
};
static struct pci_driver efx_pci_driver = { static struct pci_driver efx_pci_driver = {
.name = KBUILD_MODNAME, .name = KBUILD_MODNAME,
.id_table = efx_pci_table, .id_table = efx_pci_table,
.probe = efx_pci_probe, .probe = efx_pci_probe,
.remove = efx_pci_remove, .remove = efx_pci_remove,
.driver.pm = &efx_pm_ops, .driver.pm = &efx_pm_ops,
.err_handler = &efx_err_handlers,
}; };
/************************************************************************** /**************************************************************************
......
...@@ -33,17 +33,22 @@ extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc); ...@@ -33,17 +33,22 @@ extern int efx_setup_tc(struct net_device *net_dev, u8 num_tc);
extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx); extern unsigned int efx_tx_max_skb_descs(struct efx_nic *efx);
/* RX */ /* RX */
extern void efx_rx_config_page_split(struct efx_nic *efx);
extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); extern void efx_remove_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue); extern void efx_init_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); extern void efx_fini_rx_queue(struct efx_rx_queue *rx_queue);
extern void efx_rx_strategy(struct efx_channel *channel);
extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); extern void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue);
extern void efx_rx_slow_fill(unsigned long context); extern void efx_rx_slow_fill(unsigned long context);
extern void __efx_rx_packet(struct efx_channel *channel, extern void __efx_rx_packet(struct efx_channel *channel);
struct efx_rx_buffer *rx_buf); extern void efx_rx_packet(struct efx_rx_queue *rx_queue,
extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, unsigned int index, unsigned int n_frags,
unsigned int len, u16 flags); unsigned int len, u16 flags);
static inline void efx_rx_flush_packet(struct efx_channel *channel)
{
if (channel->rx_pkt_n_frags)
__efx_rx_packet(channel);
}
extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
#define EFX_MAX_DMAQ_SIZE 4096UL #define EFX_MAX_DMAQ_SIZE 4096UL
...@@ -67,6 +72,7 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue); ...@@ -67,6 +72,7 @@ extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue);
extern int efx_probe_filters(struct efx_nic *efx); extern int efx_probe_filters(struct efx_nic *efx);
extern void efx_restore_filters(struct efx_nic *efx); extern void efx_restore_filters(struct efx_nic *efx);
extern void efx_remove_filters(struct efx_nic *efx); extern void efx_remove_filters(struct efx_nic *efx);
extern void efx_filter_update_rx_scatter(struct efx_nic *efx);
extern s32 efx_filter_insert_filter(struct efx_nic *efx, extern s32 efx_filter_insert_filter(struct efx_nic *efx,
struct efx_filter_spec *spec, struct efx_filter_spec *spec,
bool replace); bool replace);
...@@ -171,9 +177,9 @@ static inline void efx_device_detach_sync(struct efx_nic *efx) ...@@ -171,9 +177,9 @@ static inline void efx_device_detach_sync(struct efx_nic *efx)
* TX scheduler is stopped when we're done and before * TX scheduler is stopped when we're done and before
* netif_device_present() becomes false. * netif_device_present() becomes false.
*/ */
netif_tx_lock(dev); netif_tx_lock_bh(dev);
netif_device_detach(dev); netif_device_detach(dev);
netif_tx_unlock(dev); netif_tx_unlock_bh(dev);
} }
#endif /* EFX_EFX_H */ #endif /* EFX_EFX_H */
...@@ -137,8 +137,12 @@ enum efx_loopback_mode { ...@@ -137,8 +137,12 @@ enum efx_loopback_mode {
* Reset methods are numbered in order of increasing scope. * Reset methods are numbered in order of increasing scope.
* *
* @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only) * @RESET_TYPE_INVISIBLE: Reset datapath and MAC (Falcon only)
* @RESET_TYPE_RECOVER_OR_ALL: Try to recover. Apply RESET_TYPE_ALL
* if unsuccessful.
* @RESET_TYPE_ALL: Reset datapath, MAC and PHY * @RESET_TYPE_ALL: Reset datapath, MAC and PHY
* @RESET_TYPE_WORLD: Reset as much as possible * @RESET_TYPE_WORLD: Reset as much as possible
* @RESET_TYPE_RECOVER_OR_DISABLE: Try to recover. Apply RESET_TYPE_DISABLE if
* unsuccessful.
* @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled * @RESET_TYPE_DISABLE: Reset datapath, MAC and PHY; leave NIC disabled
* @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog * @RESET_TYPE_TX_WATCHDOG: reset due to TX watchdog
* @RESET_TYPE_INT_ERROR: reset due to internal error * @RESET_TYPE_INT_ERROR: reset due to internal error
...@@ -150,9 +154,11 @@ enum efx_loopback_mode { ...@@ -150,9 +154,11 @@ enum efx_loopback_mode {
*/ */
enum reset_type { enum reset_type {
RESET_TYPE_INVISIBLE = 0, RESET_TYPE_INVISIBLE = 0,
RESET_TYPE_ALL = 1, RESET_TYPE_RECOVER_OR_ALL = 1,
RESET_TYPE_WORLD = 2, RESET_TYPE_ALL = 2,
RESET_TYPE_DISABLE = 3, RESET_TYPE_WORLD = 3,
RESET_TYPE_RECOVER_OR_DISABLE = 4,
RESET_TYPE_DISABLE = 5,
RESET_TYPE_MAX_METHOD, RESET_TYPE_MAX_METHOD,
RESET_TYPE_TX_WATCHDOG, RESET_TYPE_TX_WATCHDOG,
RESET_TYPE_INT_ERROR, RESET_TYPE_INT_ERROR,
......
...@@ -154,6 +154,7 @@ static const struct efx_ethtool_stat efx_ethtool_stats[] = { ...@@ -154,6 +154,7 @@ static const struct efx_ethtool_stat efx_ethtool_stats[] = {
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc),
}; };
/* Number of ethtool statistics */ /* Number of ethtool statistics */
...@@ -978,7 +979,8 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx, ...@@ -978,7 +979,8 @@ static int efx_ethtool_set_class_rule(struct efx_nic *efx,
rule->m_ext.data[1])) rule->m_ext.data[1]))
return -EINVAL; return -EINVAL;
efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL, 0, efx_filter_init_rx(&spec, EFX_FILTER_PRI_MANUAL,
efx->rx_scatter ? EFX_FILTER_FLAG_RX_SCATTER : 0,
(rule->ring_cookie == RX_CLS_FLOW_DISC) ? (rule->ring_cookie == RX_CLS_FLOW_DISC) ?
0xfff : rule->ring_cookie); 0xfff : rule->ring_cookie);
......
...@@ -1546,10 +1546,6 @@ static int falcon_probe_nic(struct efx_nic *efx) ...@@ -1546,10 +1546,6 @@ static int falcon_probe_nic(struct efx_nic *efx)
static void falcon_init_rx_cfg(struct efx_nic *efx) static void falcon_init_rx_cfg(struct efx_nic *efx)
{ {
/* Prior to Siena the RX DMA engine will split each frame at
* intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
* be so large that that never happens. */
const unsigned huge_buf_size = (3 * 4096) >> 5;
/* RX control FIFO thresholds (32 entries) */ /* RX control FIFO thresholds (32 entries) */
const unsigned ctrl_xon_thr = 20; const unsigned ctrl_xon_thr = 20;
const unsigned ctrl_xoff_thr = 25; const unsigned ctrl_xoff_thr = 25;
...@@ -1557,10 +1553,15 @@ static void falcon_init_rx_cfg(struct efx_nic *efx) ...@@ -1557,10 +1553,15 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
efx_reado(efx, &reg, FR_AZ_RX_CFG); efx_reado(efx, &reg, FR_AZ_RX_CFG);
if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) {
/* Data FIFO size is 5.5K */ /* Data FIFO size is 5.5K. The RX DMA engine only
* supports scattering for user-mode queues, but will
* split DMA writes at intervals of RX_USR_BUF_SIZE
* (32-byte units) even for kernel-mode queues. We
* set it to be so large that that never happens.
*/
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE, EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
huge_buf_size); (3 * 4096) >> 5);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8);
EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
...@@ -1569,7 +1570,7 @@ static void falcon_init_rx_cfg(struct efx_nic *efx) ...@@ -1569,7 +1570,7 @@ static void falcon_init_rx_cfg(struct efx_nic *efx)
/* Data FIFO size is 80K; register fields moved */ /* Data FIFO size is 80K; register fields moved */
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE, EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
huge_buf_size); EFX_RX_USR_BUF_SIZE >> 5);
/* Send XON and XOFF at ~3 * max MTU away from empty/full */ /* Send XON and XOFF at ~3 * max MTU away from empty/full */
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8);
EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8);
...@@ -1815,6 +1816,7 @@ const struct efx_nic_type falcon_a1_nic_type = { ...@@ -1815,6 +1816,7 @@ const struct efx_nic_type falcon_a1_nic_type = {
.evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER, .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
.rx_buffer_padding = 0x24, .rx_buffer_padding = 0x24,
.can_rx_scatter = false,
.max_interrupt_mode = EFX_INT_MODE_MSI, .max_interrupt_mode = EFX_INT_MODE_MSI,
.phys_addr_channels = 4, .phys_addr_channels = 4,
.timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH,
...@@ -1865,6 +1867,7 @@ const struct efx_nic_type falcon_b0_nic_type = { ...@@ -1865,6 +1867,7 @@ const struct efx_nic_type falcon_b0_nic_type = {
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
.rx_buffer_hash_size = 0x10, .rx_buffer_hash_size = 0x10,
.rx_buffer_padding = 0, .rx_buffer_padding = 0,
.can_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX, .max_interrupt_mode = EFX_INT_MODE_MSIX,
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
* interrupt handler only supports 32 * interrupt handler only supports 32
......
...@@ -66,6 +66,10 @@ struct efx_filter_state { ...@@ -66,6 +66,10 @@ struct efx_filter_state {
#endif #endif
}; };
static void efx_filter_table_clear_entry(struct efx_nic *efx,
struct efx_filter_table *table,
unsigned int filter_idx);
/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
* key derived from the n-tuple. The initial LFSR state is 0xffff. */ * key derived from the n-tuple. The initial LFSR state is 0xffff. */
static u16 efx_filter_hash(u32 key) static u16 efx_filter_hash(u32 key)
...@@ -168,6 +172,25 @@ static void efx_filter_push_rx_config(struct efx_nic *efx) ...@@ -168,6 +172,25 @@ static void efx_filter_push_rx_config(struct efx_nic *efx)
filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED, filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
!!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags & !!(table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
EFX_FILTER_FLAG_RX_RSS)); EFX_FILTER_FLAG_RX_RSS));
/* There is a single bit to enable RX scatter for all
* unmatched packets. Only set it if scatter is
* enabled in both filter specs.
*/
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
!!(table->spec[EFX_FILTER_INDEX_UC_DEF].flags &
table->spec[EFX_FILTER_INDEX_MC_DEF].flags &
EFX_FILTER_FLAG_RX_SCATTER));
} else if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
/* We don't expose 'default' filters because unmatched
* packets always go to the queue number found in the
* RSS table. But we still need to set the RX scatter
* bit here.
*/
EFX_SET_OWORD_FIELD(
filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
efx->rx_scatter);
} }
efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
...@@ -409,9 +432,18 @@ static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx) ...@@ -409,9 +432,18 @@ static void efx_filter_reset_rx_def(struct efx_nic *efx, unsigned filter_idx)
struct efx_filter_state *state = efx->filter_state; struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF]; struct efx_filter_table *table = &state->table[EFX_FILTER_TABLE_RX_DEF];
struct efx_filter_spec *spec = &table->spec[filter_idx]; struct efx_filter_spec *spec = &table->spec[filter_idx];
enum efx_filter_flags flags = 0;
/* If there's only one channel then disable RSS for non VF
* traffic, thereby allowing VFs to use RSS when the PF can't.
*/
if (efx->n_rx_channels > 1)
flags |= EFX_FILTER_FLAG_RX_RSS;
efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, if (efx->rx_scatter)
EFX_FILTER_FLAG_RX_RSS, 0); flags |= EFX_FILTER_FLAG_RX_SCATTER;
efx_filter_init_rx(spec, EFX_FILTER_PRI_MANUAL, flags, 0);
spec->type = EFX_FILTER_UC_DEF + filter_idx; spec->type = EFX_FILTER_UC_DEF + filter_idx;
table->used_bitmap[0] |= 1 << filter_idx; table->used_bitmap[0] |= 1 << filter_idx;
} }
...@@ -463,13 +495,6 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec) ...@@ -463,13 +495,6 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
break; break;
} }
case EFX_FILTER_TABLE_RX_DEF:
/* One filter spec per type */
BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
return spec->type - EFX_FILTER_UC_DEF;
case EFX_FILTER_TABLE_RX_MAC: { case EFX_FILTER_TABLE_RX_MAC: {
bool is_wild = spec->type == EFX_FILTER_MAC_WILD; bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
EFX_POPULATE_OWORD_7( EFX_POPULATE_OWORD_7(
...@@ -520,42 +545,6 @@ static bool efx_filter_equal(const struct efx_filter_spec *left, ...@@ -520,42 +545,6 @@ static bool efx_filter_equal(const struct efx_filter_spec *left,
return true; return true;
} }
static int efx_filter_search(struct efx_filter_table *table,
struct efx_filter_spec *spec, u32 key,
bool for_insert, unsigned int *depth_required)
{
unsigned hash, incr, filter_idx, depth, depth_max;
hash = efx_filter_hash(key);
incr = efx_filter_increment(key);
filter_idx = hash & (table->size - 1);
depth = 1;
depth_max = (for_insert ?
(spec->priority <= EFX_FILTER_PRI_HINT ?
FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX) :
table->search_depth[spec->type]);
for (;;) {
/* Return success if entry is used and matches this spec
* or entry is unused and we are trying to insert.
*/
if (test_bit(filter_idx, table->used_bitmap) ?
efx_filter_equal(spec, &table->spec[filter_idx]) :
for_insert) {
*depth_required = depth;
return filter_idx;
}
/* Return failure if we reached the maximum search depth */
if (depth == depth_max)
return for_insert ? -EBUSY : -ENOENT;
filter_idx = (filter_idx + incr) & (table->size - 1);
++depth;
}
}
/* /*
* Construct/deconstruct external filter IDs. At least the RX filter * Construct/deconstruct external filter IDs. At least the RX filter
* IDs must be ordered by matching priority, for RX NFC semantics. * IDs must be ordered by matching priority, for RX NFC semantics.
...@@ -650,44 +639,111 @@ u32 efx_filter_get_rx_id_limit(struct efx_nic *efx) ...@@ -650,44 +639,111 @@ u32 efx_filter_get_rx_id_limit(struct efx_nic *efx)
* efx_filter_insert_filter - add or replace a filter * efx_filter_insert_filter - add or replace a filter
* @efx: NIC in which to insert the filter * @efx: NIC in which to insert the filter
* @spec: Specification for the filter * @spec: Specification for the filter
* @replace: Flag for whether the specified filter may replace a filter * @replace_equal: Flag for whether the specified filter may replace an
* with an identical match expression and equal or lower priority * existing filter with equal priority
* *
* On success, return the filter ID. * On success, return the filter ID.
* On failure, return a negative error code. * On failure, return a negative error code.
*
* If an existing filter has equal match values to the new filter
* spec, then the new filter might replace it, depending on the
* relative priorities. If the existing filter has lower priority, or
* if @replace_equal is set and it has equal priority, then it is
* replaced. Otherwise the function fails, returning -%EPERM if
* the existing filter has higher priority or -%EEXIST if it has
* equal priority.
*/ */
s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
bool replace) bool replace_equal)
{ {
struct efx_filter_state *state = efx->filter_state; struct efx_filter_state *state = efx->filter_state;
struct efx_filter_table *table = efx_filter_spec_table(state, spec); struct efx_filter_table *table = efx_filter_spec_table(state, spec);
struct efx_filter_spec *saved_spec;
efx_oword_t filter; efx_oword_t filter;
unsigned int filter_idx, depth = 0; int rep_index, ins_index;
u32 key; unsigned int depth = 0;
int rc; int rc;
if (!table || table->size == 0) if (!table || table->size == 0)
return -EINVAL; return -EINVAL;
key = efx_filter_build(&filter, spec);
netif_vdbg(efx, hw, efx->net_dev, netif_vdbg(efx, hw, efx->net_dev,
"%s: type %d search_depth=%d", __func__, spec->type, "%s: type %d search_depth=%d", __func__, spec->type,
table->search_depth[spec->type]); table->search_depth[spec->type]);
spin_lock_bh(&state->lock); if (table->id == EFX_FILTER_TABLE_RX_DEF) {
/* One filter spec per type */
BUILD_BUG_ON(EFX_FILTER_INDEX_UC_DEF != 0);
BUILD_BUG_ON(EFX_FILTER_INDEX_MC_DEF !=
EFX_FILTER_MC_DEF - EFX_FILTER_UC_DEF);
rep_index = spec->type - EFX_FILTER_INDEX_UC_DEF;
ins_index = rep_index;
rc = efx_filter_search(table, spec, key, true, &depth); spin_lock_bh(&state->lock);
if (rc < 0) } else {
goto out; /* Search concurrently for
filter_idx = rc; * (1) a filter to be replaced (rep_index): any filter
BUG_ON(filter_idx >= table->size); * with the same match values, up to the current
saved_spec = &table->spec[filter_idx]; * search depth for this type, and
* (2) the insertion point (ins_index): (1) or any
if (test_bit(filter_idx, table->used_bitmap)) { * free slot before it or up to the maximum search
/* Should we replace the existing filter? */ * depth for this priority
if (!replace) { * We fail if we cannot find (2).
*
* We can stop once either
* (a) we find (1), in which case we have definitely
* found (2) as well; or
* (b) we have searched exhaustively for (1), and have
* either found (2) or searched exhaustively for it
*/
u32 key = efx_filter_build(&filter, spec);
unsigned int hash = efx_filter_hash(key);
unsigned int incr = efx_filter_increment(key);
unsigned int max_rep_depth = table->search_depth[spec->type];
unsigned int max_ins_depth =
spec->priority <= EFX_FILTER_PRI_HINT ?
FILTER_CTL_SRCH_HINT_MAX : FILTER_CTL_SRCH_MAX;
unsigned int i = hash & (table->size - 1);
ins_index = -1;
depth = 1;
spin_lock_bh(&state->lock);
for (;;) {
if (!test_bit(i, table->used_bitmap)) {
if (ins_index < 0)
ins_index = i;
} else if (efx_filter_equal(spec, &table->spec[i])) {
/* Case (a) */
if (ins_index < 0)
ins_index = i;
rep_index = i;
break;
}
if (depth >= max_rep_depth &&
(ins_index >= 0 || depth >= max_ins_depth)) {
/* Case (b) */
if (ins_index < 0) {
rc = -EBUSY;
goto out;
}
rep_index = -1;
break;
}
i = (i + incr) & (table->size - 1);
++depth;
}
}
/* If we found a filter to be replaced, check whether we
* should do so
*/
if (rep_index >= 0) {
struct efx_filter_spec *saved_spec = &table->spec[rep_index];
if (spec->priority == saved_spec->priority && !replace_equal) {
rc = -EEXIST; rc = -EEXIST;
goto out; goto out;
} }
...@@ -695,11 +751,14 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, ...@@ -695,11 +751,14 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
rc = -EPERM; rc = -EPERM;
goto out; goto out;
} }
} else { }
__set_bit(filter_idx, table->used_bitmap);
/* Insert the filter */
if (ins_index != rep_index) {
__set_bit(ins_index, table->used_bitmap);
++table->used; ++table->used;
} }
*saved_spec = *spec; table->spec[ins_index] = *spec;
if (table->id == EFX_FILTER_TABLE_RX_DEF) { if (table->id == EFX_FILTER_TABLE_RX_DEF) {
efx_filter_push_rx_config(efx); efx_filter_push_rx_config(efx);
...@@ -713,13 +772,19 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, ...@@ -713,13 +772,19 @@ s32 efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
} }
efx_writeo(efx, &filter, efx_writeo(efx, &filter,
table->offset + table->step * filter_idx); table->offset + table->step * ins_index);
/* If we were able to replace a filter by inserting
* at a lower depth, clear the replaced filter
*/
if (ins_index != rep_index && rep_index >= 0)
efx_filter_table_clear_entry(efx, table, rep_index);
} }
netif_vdbg(efx, hw, efx->net_dev, netif_vdbg(efx, hw, efx->net_dev,
"%s: filter type %d index %d rxq %u set", "%s: filter type %d index %d rxq %u set",
__func__, spec->type, filter_idx, spec->dmaq_id); __func__, spec->type, ins_index, spec->dmaq_id);
rc = efx_filter_make_id(spec, filter_idx); rc = efx_filter_make_id(spec, ins_index);
out: out:
spin_unlock_bh(&state->lock); spin_unlock_bh(&state->lock);
...@@ -1060,6 +1125,50 @@ void efx_remove_filters(struct efx_nic *efx) ...@@ -1060,6 +1125,50 @@ void efx_remove_filters(struct efx_nic *efx)
kfree(state); kfree(state);
} }
/* Update scatter enable flags for filters pointing to our own RX queues */
void efx_filter_update_rx_scatter(struct efx_nic *efx)
{
struct efx_filter_state *state = efx->filter_state;
enum efx_filter_table_id table_id;
struct efx_filter_table *table;
efx_oword_t filter;
unsigned int filter_idx;
spin_lock_bh(&state->lock);
for (table_id = EFX_FILTER_TABLE_RX_IP;
table_id <= EFX_FILTER_TABLE_RX_DEF;
table_id++) {
table = &state->table[table_id];
for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
if (!test_bit(filter_idx, table->used_bitmap) ||
table->spec[filter_idx].dmaq_id >=
efx->n_rx_channels)
continue;
if (efx->rx_scatter)
table->spec[filter_idx].flags |=
EFX_FILTER_FLAG_RX_SCATTER;
else
table->spec[filter_idx].flags &=
~EFX_FILTER_FLAG_RX_SCATTER;
if (table_id == EFX_FILTER_TABLE_RX_DEF)
/* Pushed by efx_filter_push_rx_config() */
continue;
efx_filter_build(&filter, &table->spec[filter_idx]);
efx_writeo(efx, &filter,
table->offset + table->step * filter_idx);
}
}
efx_filter_push_rx_config(efx);
spin_unlock_bh(&state->lock);
}
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb,
......
...@@ -553,6 +553,7 @@ ...@@ -553,6 +553,7 @@
#define MC_CMD_PTP_MODE_V1_VLAN 0x1 /* enum */ #define MC_CMD_PTP_MODE_V1_VLAN 0x1 /* enum */
#define MC_CMD_PTP_MODE_V2 0x2 /* enum */ #define MC_CMD_PTP_MODE_V2 0x2 /* enum */
#define MC_CMD_PTP_MODE_V2_VLAN 0x3 /* enum */ #define MC_CMD_PTP_MODE_V2_VLAN 0x3 /* enum */
#define MC_CMD_PTP_MODE_V2_ENHANCED 0x4 /* enum */
/* MC_CMD_PTP_IN_DISABLE msgrequest */ /* MC_CMD_PTP_IN_DISABLE msgrequest */
#define MC_CMD_PTP_IN_DISABLE_LEN 8 #define MC_CMD_PTP_IN_DISABLE_LEN 8
......
...@@ -69,6 +69,12 @@ ...@@ -69,6 +69,12 @@
#define EFX_TXQ_TYPES 4 #define EFX_TXQ_TYPES 4
#define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS) #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
/* Maximum possible MTU the driver supports */
#define EFX_MAX_MTU (9 * 1024)
/* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page. */
#define EFX_RX_USR_BUF_SIZE 1824
/* Forward declare Precision Time Protocol (PTP) support structure. */ /* Forward declare Precision Time Protocol (PTP) support structure. */
struct efx_ptp_data; struct efx_ptp_data;
...@@ -206,25 +212,23 @@ struct efx_tx_queue { ...@@ -206,25 +212,23 @@ struct efx_tx_queue {
/** /**
* struct efx_rx_buffer - An Efx RX data buffer * struct efx_rx_buffer - An Efx RX data buffer
* @dma_addr: DMA base address of the buffer * @dma_addr: DMA base address of the buffer
* @skb: The associated socket buffer. Valid iff !(@flags & %EFX_RX_BUF_PAGE). * @page: The associated page buffer.
* Will be %NULL if the buffer slot is currently free.
* @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
* Will be %NULL if the buffer slot is currently free. * Will be %NULL if the buffer slot is currently free.
* @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE. * @page_offset: If pending: offset in @page of DMA base address.
* @len: Buffer length, in bytes. * If completed: offset in @page of Ethernet header.
* @flags: Flags for buffer and packet state. * @len: If pending: length for DMA descriptor.
* If completed: received length, excluding hash prefix.
* @flags: Flags for buffer and packet state. These are only set on the
* first buffer of a scattered packet.
*/ */
struct efx_rx_buffer { struct efx_rx_buffer {
dma_addr_t dma_addr; dma_addr_t dma_addr;
union { struct page *page;
struct sk_buff *skb;
struct page *page;
} u;
u16 page_offset; u16 page_offset;
u16 len; u16 len;
u16 flags; u16 flags;
}; };
#define EFX_RX_BUF_PAGE 0x0001 #define EFX_RX_BUF_LAST_IN_PAGE 0x0001
#define EFX_RX_PKT_CSUMMED 0x0002 #define EFX_RX_PKT_CSUMMED 0x0002
#define EFX_RX_PKT_DISCARD 0x0004 #define EFX_RX_PKT_DISCARD 0x0004
...@@ -260,14 +264,23 @@ struct efx_rx_page_state { ...@@ -260,14 +264,23 @@ struct efx_rx_page_state {
* @added_count: Number of buffers added to the receive queue. * @added_count: Number of buffers added to the receive queue.
* @notified_count: Number of buffers given to NIC (<= @added_count). * @notified_count: Number of buffers given to NIC (<= @added_count).
* @removed_count: Number of buffers removed from the receive queue. * @removed_count: Number of buffers removed from the receive queue.
* @scatter_n: Number of buffers used by current packet
* @page_ring: The ring to store DMA mapped pages for reuse.
* @page_add: Counter to calculate the write pointer for the recycle ring.
* @page_remove: Counter to calculate the read pointer for the recycle ring.
* @page_recycle_count: The number of pages that have been recycled.
* @page_recycle_failed: The number of pages that couldn't be recycled because
* the kernel still held a reference to them.
* @page_recycle_full: The number of pages that were released because the
* recycle ring was full.
* @page_ptr_mask: The number of pages in the RX recycle ring minus 1.
* @max_fill: RX descriptor maximum fill level (<= ring size) * @max_fill: RX descriptor maximum fill level (<= ring size)
* @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
* (<= @max_fill) * (<= @max_fill)
* @min_fill: RX descriptor minimum non-zero fill level. * @min_fill: RX descriptor minimum non-zero fill level.
* This records the minimum fill level observed when a ring * This records the minimum fill level observed when a ring
* refill was triggered. * refill was triggered.
* @alloc_page_count: RX allocation strategy counter. * @recycle_count: RX buffer recycle counter.
* @alloc_skb_count: RX allocation strategy counter.
* @slow_fill: Timer used to defer efx_nic_generate_fill_event(). * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
*/ */
struct efx_rx_queue { struct efx_rx_queue {
...@@ -279,15 +292,22 @@ struct efx_rx_queue { ...@@ -279,15 +292,22 @@ struct efx_rx_queue {
bool enabled; bool enabled;
bool flush_pending; bool flush_pending;
int added_count; unsigned int added_count;
int notified_count; unsigned int notified_count;
int removed_count; unsigned int removed_count;
unsigned int scatter_n;
struct page **page_ring;
unsigned int page_add;
unsigned int page_remove;
unsigned int page_recycle_count;
unsigned int page_recycle_failed;
unsigned int page_recycle_full;
unsigned int page_ptr_mask;
unsigned int max_fill; unsigned int max_fill;
unsigned int fast_fill_trigger; unsigned int fast_fill_trigger;
unsigned int min_fill; unsigned int min_fill;
unsigned int min_overfill; unsigned int min_overfill;
unsigned int alloc_page_count; unsigned int recycle_count;
unsigned int alloc_skb_count;
struct timer_list slow_fill; struct timer_list slow_fill;
unsigned int slow_fill_count; unsigned int slow_fill_count;
}; };
...@@ -336,10 +356,6 @@ enum efx_rx_alloc_method { ...@@ -336,10 +356,6 @@ enum efx_rx_alloc_method {
* @event_test_cpu: Last CPU to handle interrupt or test event for this channel * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
* @irq_count: Number of IRQs since last adaptive moderation decision * @irq_count: Number of IRQs since last adaptive moderation decision
* @irq_mod_score: IRQ moderation score * @irq_mod_score: IRQ moderation score
* @rx_alloc_level: Watermark based heuristic counter for pushing descriptors
* and diagnostic counters
* @rx_alloc_push_pages: RX allocation method currently in use for pushing
* descriptors
* @n_rx_tobe_disc: Count of RX_TOBE_DISC errors * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
* @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
* @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
...@@ -347,6 +363,12 @@ enum efx_rx_alloc_method { ...@@ -347,6 +363,12 @@ enum efx_rx_alloc_method {
* @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
* @n_rx_overlength: Count of RX_OVERLENGTH errors * @n_rx_overlength: Count of RX_OVERLENGTH errors
* @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
* @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
* lack of descriptors
* @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
* __efx_rx_packet(), or zero if there is none
* @rx_pkt_index: Ring index of first buffer for next packet to be delivered
* by __efx_rx_packet(), if @rx_pkt_n_frags != 0
* @rx_queue: RX queue for this channel * @rx_queue: RX queue for this channel
* @tx_queue: TX queues for this channel * @tx_queue: TX queues for this channel
*/ */
...@@ -371,9 +393,6 @@ struct efx_channel { ...@@ -371,9 +393,6 @@ struct efx_channel {
unsigned int rfs_filters_added; unsigned int rfs_filters_added;
#endif #endif
int rx_alloc_level;
int rx_alloc_push_pages;
unsigned n_rx_tobe_disc; unsigned n_rx_tobe_disc;
unsigned n_rx_ip_hdr_chksum_err; unsigned n_rx_ip_hdr_chksum_err;
unsigned n_rx_tcp_udp_chksum_err; unsigned n_rx_tcp_udp_chksum_err;
...@@ -381,11 +400,10 @@ struct efx_channel { ...@@ -381,11 +400,10 @@ struct efx_channel {
unsigned n_rx_frm_trunc; unsigned n_rx_frm_trunc;
unsigned n_rx_overlength; unsigned n_rx_overlength;
unsigned n_skbuff_leaks; unsigned n_skbuff_leaks;
unsigned int n_rx_nodesc_trunc;
/* Used to pipeline received packets in order to optimise memory unsigned int rx_pkt_n_frags;
* access with prefetches. unsigned int rx_pkt_index;
*/
struct efx_rx_buffer *rx_pkt;
struct efx_rx_queue rx_queue; struct efx_rx_queue rx_queue;
struct efx_tx_queue tx_queue[EFX_TXQ_TYPES]; struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
...@@ -410,7 +428,7 @@ struct efx_channel_type { ...@@ -410,7 +428,7 @@ struct efx_channel_type {
void (*post_remove)(struct efx_channel *); void (*post_remove)(struct efx_channel *);
void (*get_name)(struct efx_channel *, char *buf, size_t len); void (*get_name)(struct efx_channel *, char *buf, size_t len);
struct efx_channel *(*copy)(const struct efx_channel *); struct efx_channel *(*copy)(const struct efx_channel *);
void (*receive_skb)(struct efx_channel *, struct sk_buff *); bool (*receive_skb)(struct efx_channel *, struct sk_buff *);
bool keep_eventq; bool keep_eventq;
}; };
...@@ -446,6 +464,7 @@ enum nic_state { ...@@ -446,6 +464,7 @@ enum nic_state {
STATE_UNINIT = 0, /* device being probed/removed or is frozen */ STATE_UNINIT = 0, /* device being probed/removed or is frozen */
STATE_READY = 1, /* hardware ready and netdev registered */ STATE_READY = 1, /* hardware ready and netdev registered */
STATE_DISABLED = 2, /* device disabled due to hardware errors */ STATE_DISABLED = 2, /* device disabled due to hardware errors */
STATE_RECOVERY = 3, /* device recovering from PCI error */
}; };
/* /*
...@@ -684,10 +703,13 @@ struct vfdi_status; ...@@ -684,10 +703,13 @@ struct vfdi_status;
* @n_channels: Number of channels in use * @n_channels: Number of channels in use
* @n_rx_channels: Number of channels used for RX (= number of RX queues) * @n_rx_channels: Number of channels used for RX (= number of RX queues)
* @n_tx_channels: Number of channels used for TX * @n_tx_channels: Number of channels used for TX
* @rx_buffer_len: RX buffer length * @rx_dma_len: Current maximum RX DMA length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @rx_buffer_truesize: Amortised allocation size of an RX buffer,
* for use in sk_buff::truesize
* @rx_hash_key: Toeplitz hash key for RSS * @rx_hash_key: Toeplitz hash key for RSS
* @rx_indir_table: Indirection table for RSS * @rx_indir_table: Indirection table for RSS
* @rx_scatter: Scatter mode enabled for receives
* @int_error_count: Number of internal errors seen recently * @int_error_count: Number of internal errors seen recently
* @int_error_expire: Time at which error count will be expired * @int_error_expire: Time at which error count will be expired
* @irq_status: Interrupt status buffer * @irq_status: Interrupt status buffer
...@@ -800,10 +822,15 @@ struct efx_nic { ...@@ -800,10 +822,15 @@ struct efx_nic {
unsigned rss_spread; unsigned rss_spread;
unsigned tx_channel_offset; unsigned tx_channel_offset;
unsigned n_tx_channels; unsigned n_tx_channels;
unsigned int rx_buffer_len; unsigned int rx_dma_len;
unsigned int rx_buffer_order; unsigned int rx_buffer_order;
unsigned int rx_buffer_truesize;
unsigned int rx_page_buf_step;
unsigned int rx_bufs_per_page;
unsigned int rx_pages_per_batch;
u8 rx_hash_key[40]; u8 rx_hash_key[40];
u32 rx_indir_table[128]; u32 rx_indir_table[128];
bool rx_scatter;
unsigned int_error_count; unsigned int_error_count;
unsigned long int_error_expire; unsigned long int_error_expire;
...@@ -934,8 +961,9 @@ static inline unsigned int efx_port_num(struct efx_nic *efx) ...@@ -934,8 +961,9 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
* @evq_ptr_tbl_base: Event queue pointer table base address * @evq_ptr_tbl_base: Event queue pointer table base address
* @evq_rptr_tbl_base: Event queue read-pointer table base address * @evq_rptr_tbl_base: Event queue read-pointer table base address
* @max_dma_mask: Maximum possible DMA mask * @max_dma_mask: Maximum possible DMA mask
* @rx_buffer_hash_size: Size of hash at start of RX buffer * @rx_buffer_hash_size: Size of hash at start of RX packet
* @rx_buffer_padding: Size of padding at end of RX buffer * @rx_buffer_padding: Size of padding at end of RX packet
* @can_rx_scatter: NIC is able to scatter packet to multiple buffers
* @max_interrupt_mode: Highest capability interrupt mode supported * @max_interrupt_mode: Highest capability interrupt mode supported
* from &enum efx_init_mode. * from &enum efx_init_mode.
* @phys_addr_channels: Number of channels with physically addressed * @phys_addr_channels: Number of channels with physically addressed
...@@ -983,6 +1011,7 @@ struct efx_nic_type { ...@@ -983,6 +1011,7 @@ struct efx_nic_type {
u64 max_dma_mask; u64 max_dma_mask;
unsigned int rx_buffer_hash_size; unsigned int rx_buffer_hash_size;
unsigned int rx_buffer_padding; unsigned int rx_buffer_padding;
bool can_rx_scatter;
unsigned int max_interrupt_mode; unsigned int max_interrupt_mode;
unsigned int phys_addr_channels; unsigned int phys_addr_channels;
unsigned int timer_period_max; unsigned int timer_period_max;
......
...@@ -591,12 +591,22 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue) ...@@ -591,12 +591,22 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
struct efx_nic *efx = rx_queue->efx; struct efx_nic *efx = rx_queue->efx;
bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
bool iscsi_digest_en = is_b0; bool iscsi_digest_en = is_b0;
bool jumbo_en;
/* For kernel-mode queues in Falcon A1, the JUMBO flag enables
* DMA to continue after a PCIe page boundary (and scattering
* is not possible). In Falcon B0 and Siena, it enables
* scatter.
*/
jumbo_en = !is_b0 || efx->rx_scatter;
netif_dbg(efx, hw, efx->net_dev, netif_dbg(efx, hw, efx->net_dev,
"RX queue %d ring in special buffers %d-%d\n", "RX queue %d ring in special buffers %d-%d\n",
efx_rx_queue_index(rx_queue), rx_queue->rxd.index, efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
rx_queue->rxd.index + rx_queue->rxd.entries - 1); rx_queue->rxd.index + rx_queue->rxd.entries - 1);
rx_queue->scatter_n = 0;
/* Pin RX descriptor ring */ /* Pin RX descriptor ring */
efx_init_special_buffer(efx, &rx_queue->rxd); efx_init_special_buffer(efx, &rx_queue->rxd);
...@@ -613,8 +623,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue) ...@@ -613,8 +623,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
FRF_AZ_RX_DESCQ_SIZE, FRF_AZ_RX_DESCQ_SIZE,
__ffs(rx_queue->rxd.entries), __ffs(rx_queue->rxd.entries),
FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
/* For >=B0 this is scatter so disable */ FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
FRF_AZ_RX_DESCQ_EN, 1); FRF_AZ_RX_DESCQ_EN, 1);
efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
efx_rx_queue_index(rx_queue)); efx_rx_queue_index(rx_queue));
...@@ -968,13 +977,24 @@ static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue, ...@@ -968,13 +977,24 @@ static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
EFX_RX_PKT_DISCARD : 0; EFX_RX_PKT_DISCARD : 0;
} }
/* Handle receive events that are not in-order. */ /* Handle receive events that are not in-order. Return true if this
static void * can be handled as a partial packet discard, false if it's more
* serious.
*/
static bool
efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
{ {
struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
struct efx_nic *efx = rx_queue->efx; struct efx_nic *efx = rx_queue->efx;
unsigned expected, dropped; unsigned expected, dropped;
if (rx_queue->scatter_n &&
index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
rx_queue->ptr_mask)) {
++channel->n_rx_nodesc_trunc;
return true;
}
expected = rx_queue->removed_count & rx_queue->ptr_mask; expected = rx_queue->removed_count & rx_queue->ptr_mask;
dropped = (index - expected) & rx_queue->ptr_mask; dropped = (index - expected) & rx_queue->ptr_mask;
netif_info(efx, rx_err, efx->net_dev, netif_info(efx, rx_err, efx->net_dev,
...@@ -983,6 +1003,7 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) ...@@ -983,6 +1003,7 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
return false;
} }
/* Handle a packet received event /* Handle a packet received event
...@@ -998,7 +1019,7 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) ...@@ -998,7 +1019,7 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
unsigned expected_ptr; unsigned expected_ptr;
bool rx_ev_pkt_ok; bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
u16 flags; u16 flags;
struct efx_rx_queue *rx_queue; struct efx_rx_queue *rx_queue;
struct efx_nic *efx = channel->efx; struct efx_nic *efx = channel->efx;
...@@ -1006,21 +1027,56 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) ...@@ -1006,21 +1027,56 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
if (unlikely(ACCESS_ONCE(efx->reset_pending))) if (unlikely(ACCESS_ONCE(efx->reset_pending)))
return; return;
/* Basic packet information */ rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
channel->channel); channel->channel);
rx_queue = efx_channel_get_rx_queue(channel); rx_queue = efx_channel_get_rx_queue(channel);
rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
if (unlikely(rx_ev_desc_ptr != expected_ptr)) rx_queue->ptr_mask);
efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
/* Check for partial drops and other errors */
if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
if (rx_ev_desc_ptr != expected_ptr &&
!efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
return;
/* Discard all pending fragments */
if (rx_queue->scatter_n) {
efx_rx_packet(
rx_queue,
rx_queue->removed_count & rx_queue->ptr_mask,
rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
rx_queue->removed_count += rx_queue->scatter_n;
rx_queue->scatter_n = 0;
}
/* Return if there is no new fragment */
if (rx_ev_desc_ptr != expected_ptr)
return;
/* Discard new fragment if not SOP */
if (!rx_ev_sop) {
efx_rx_packet(
rx_queue,
rx_queue->removed_count & rx_queue->ptr_mask,
1, 0, EFX_RX_PKT_DISCARD);
++rx_queue->removed_count;
return;
}
}
++rx_queue->scatter_n;
if (rx_ev_cont)
return;
rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
if (likely(rx_ev_pkt_ok)) { if (likely(rx_ev_pkt_ok)) {
/* If packet is marked as OK and packet type is TCP/IP or /* If packet is marked as OK and packet type is TCP/IP or
...@@ -1048,7 +1104,11 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) ...@@ -1048,7 +1104,11 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
channel->irq_mod_score += 2; channel->irq_mod_score += 2;
/* Handle received packet */ /* Handle received packet */
efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags); efx_rx_packet(rx_queue,
rx_queue->removed_count & rx_queue->ptr_mask,
rx_queue->scatter_n, rx_ev_byte_cnt, flags);
rx_queue->removed_count += rx_queue->scatter_n;
rx_queue->scatter_n = 0;
} }
/* If this flush done event corresponds to a &struct efx_tx_queue, then /* If this flush done event corresponds to a &struct efx_tx_queue, then
......
...@@ -99,6 +99,9 @@ ...@@ -99,6 +99,9 @@
#define PTP_V2_VERSION_LENGTH 1 #define PTP_V2_VERSION_LENGTH 1
#define PTP_V2_VERSION_OFFSET 29 #define PTP_V2_VERSION_OFFSET 29
#define PTP_V2_UUID_LENGTH 8
#define PTP_V2_UUID_OFFSET 48
/* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2), /* Although PTP V2 UUIDs are comprised a ClockIdentity (8) and PortNumber (2),
* the MC only captures the last six bytes of the clock identity. These values * the MC only captures the last six bytes of the clock identity. These values
* reflect those, not the ones used in the standard. The standard permits * reflect those, not the ones used in the standard. The standard permits
...@@ -429,13 +432,10 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf, ...@@ -429,13 +432,10 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
unsigned number_readings = (response_length / unsigned number_readings = (response_length /
MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN); MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN);
unsigned i; unsigned i;
unsigned min;
unsigned min_set = 0;
unsigned total; unsigned total;
unsigned ngood = 0; unsigned ngood = 0;
unsigned last_good = 0; unsigned last_good = 0;
struct efx_ptp_data *ptp = efx->ptp_data; struct efx_ptp_data *ptp = efx->ptp_data;
bool min_valid = false;
u32 last_sec; u32 last_sec;
u32 start_sec; u32 start_sec;
struct timespec delta; struct timespec delta;
...@@ -443,35 +443,17 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf, ...@@ -443,35 +443,17 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
if (number_readings == 0) if (number_readings == 0)
return -EAGAIN; return -EAGAIN;
/* Find minimum value in this set of results, discarding clearly /* Read the set of results and increment stats for any results that
* erroneous results. * appera to be erroneous.
*/ */
for (i = 0; i < number_readings; i++) { for (i = 0; i < number_readings; i++) {
efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]); efx_ptp_read_timeset(synch_buf, &ptp->timeset[i]);
synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN; synch_buf += MC_CMD_PTP_OUT_SYNCHRONIZE_TIMESET_LEN;
if (ptp->timeset[i].window > SYNCHRONISATION_GRANULARITY_NS) {
if (min_valid) {
if (ptp->timeset[i].window < min_set)
min_set = ptp->timeset[i].window;
} else {
min_valid = true;
min_set = ptp->timeset[i].window;
}
}
}
if (min_valid) {
if (ptp->base_sync_valid && (min_set > ptp->base_sync_ns))
min = ptp->base_sync_ns;
else
min = min_set;
} else {
min = SYNCHRONISATION_GRANULARITY_NS;
} }
/* Discard excessively long synchronise durations. The MC times /* Find the last good host-MC synchronization result. The MC times
* when it finishes reading the host time so the corrected window * when it finishes reading the host time so the corrected window time
* time should be fairly constant for a given platform. * should be fairly constant for a given platform.
*/ */
total = 0; total = 0;
for (i = 0; i < number_readings; i++) for (i = 0; i < number_readings; i++)
...@@ -489,8 +471,8 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf, ...@@ -489,8 +471,8 @@ static int efx_ptp_process_times(struct efx_nic *efx, u8 *synch_buf,
if (ngood == 0) { if (ngood == 0) {
netif_warn(efx, drv, efx->net_dev, netif_warn(efx, drv, efx->net_dev,
"PTP no suitable synchronisations %dns %dns\n", "PTP no suitable synchronisations %dns\n",
ptp->base_sync_ns, min_set); ptp->base_sync_ns);
return -EAGAIN; return -EAGAIN;
} }
...@@ -1006,43 +988,53 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb) ...@@ -1006,43 +988,53 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
* the receive timestamp from the MC - this will probably occur after the * the receive timestamp from the MC - this will probably occur after the
* packet arrival because of the processing in the MC. * packet arrival because of the processing in the MC.
*/ */
static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
{ {
struct efx_nic *efx = channel->efx; struct efx_nic *efx = channel->efx;
struct efx_ptp_data *ptp = efx->ptp_data; struct efx_ptp_data *ptp = efx->ptp_data;
struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb; struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
u8 *data; u8 *match_data_012, *match_data_345;
unsigned int version; unsigned int version;
match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS); match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
/* Correct version? */ /* Correct version? */
if (ptp->mode == MC_CMD_PTP_MODE_V1) { if (ptp->mode == MC_CMD_PTP_MODE_V1) {
if (skb->len < PTP_V1_MIN_LENGTH) { if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) {
netif_receive_skb(skb); return false;
return;
} }
version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]); version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]);
if (version != PTP_VERSION_V1) { if (version != PTP_VERSION_V1) {
netif_receive_skb(skb); return false;
return;
} }
/* PTP V1 uses all six bytes of the UUID to match the packet
* to the timestamp
*/
match_data_012 = skb->data + PTP_V1_UUID_OFFSET;
match_data_345 = skb->data + PTP_V1_UUID_OFFSET + 3;
} else { } else {
if (skb->len < PTP_V2_MIN_LENGTH) { if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) {
netif_receive_skb(skb); return false;
return;
} }
version = skb->data[PTP_V2_VERSION_OFFSET]; version = skb->data[PTP_V2_VERSION_OFFSET];
BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2);
BUILD_BUG_ON(PTP_V1_UUID_OFFSET != PTP_V2_MC_UUID_OFFSET);
BUILD_BUG_ON(PTP_V1_UUID_LENGTH != PTP_V2_MC_UUID_LENGTH);
BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET);
BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH);
if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) { if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
netif_receive_skb(skb); return false;
return; }
/* The original V2 implementation uses bytes 2-7 of
* the UUID to match the packet to the timestamp. This
* discards two of the bytes of the MAC address used
* to create the UUID (SF bug 33070). The PTP V2
* enhanced mode fixes this issue and uses bytes 0-2
* and byte 5-7 of the UUID.
*/
match_data_345 = skb->data + PTP_V2_UUID_OFFSET + 5;
if (ptp->mode == MC_CMD_PTP_MODE_V2) {
match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 2;
} else {
match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 0;
BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED);
} }
} }
...@@ -1056,14 +1048,19 @@ static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) ...@@ -1056,14 +1048,19 @@ static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
timestamps = skb_hwtstamps(skb); timestamps = skb_hwtstamps(skb);
memset(timestamps, 0, sizeof(*timestamps)); memset(timestamps, 0, sizeof(*timestamps));
/* We expect the sequence number to be in the same position in
* the packet for PTP V1 and V2
*/
BUILD_BUG_ON(PTP_V1_SEQUENCE_OFFSET != PTP_V2_SEQUENCE_OFFSET);
BUILD_BUG_ON(PTP_V1_SEQUENCE_LENGTH != PTP_V2_SEQUENCE_LENGTH);
/* Extract UUID/Sequence information */ /* Extract UUID/Sequence information */
data = skb->data + PTP_V1_UUID_OFFSET; match->words[0] = (match_data_012[0] |
match->words[0] = (data[0] | (match_data_012[1] << 8) |
(data[1] << 8) | (match_data_012[2] << 16) |
(data[2] << 16) | (match_data_345[0] << 24));
(data[3] << 24)); match->words[1] = (match_data_345[1] |
match->words[1] = (data[4] | (match_data_345[2] << 8) |
(data[5] << 8) |
(skb->data[PTP_V1_SEQUENCE_OFFSET + (skb->data[PTP_V1_SEQUENCE_OFFSET +
PTP_V1_SEQUENCE_LENGTH - 1] << PTP_V1_SEQUENCE_LENGTH - 1] <<
16)); 16));
...@@ -1073,6 +1070,8 @@ static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb) ...@@ -1073,6 +1070,8 @@ static void efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
skb_queue_tail(&ptp->rxq, skb); skb_queue_tail(&ptp->rxq, skb);
queue_work(ptp->workwq, &ptp->work); queue_work(ptp->workwq, &ptp->work);
return true;
} }
/* Transmit a PTP packet. This has to be transmitted by the MC /* Transmit a PTP packet. This has to be transmitted by the MC
...@@ -1167,7 +1166,7 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init) ...@@ -1167,7 +1166,7 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
* timestamped * timestamped
*/ */
init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT; init->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
new_mode = MC_CMD_PTP_MODE_V2; new_mode = MC_CMD_PTP_MODE_V2_ENHANCED;
enable_wanted = true; enable_wanted = true;
break; break;
case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_EVENT:
...@@ -1186,7 +1185,14 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init) ...@@ -1186,7 +1185,14 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct hwtstamp_config *init)
if (init->tx_type != HWTSTAMP_TX_OFF) if (init->tx_type != HWTSTAMP_TX_OFF)
enable_wanted = true; enable_wanted = true;
/* Old versions of the firmware do not support the improved
* UUID filtering option (SF bug 33070). If the firmware does
* not accept the enhanced mode, fall back to the standard PTP
* v2 UUID filtering.
*/
rc = efx_ptp_change_mode(efx, enable_wanted, new_mode); rc = efx_ptp_change_mode(efx, enable_wanted, new_mode);
if ((rc != 0) && (new_mode == MC_CMD_PTP_MODE_V2_ENHANCED))
rc = efx_ptp_change_mode(efx, enable_wanted, MC_CMD_PTP_MODE_V2);
if (rc != 0) if (rc != 0)
return rc; return rc;
......
此差异已折叠。
...@@ -202,7 +202,7 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests) ...@@ -202,7 +202,7 @@ static int siena_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
static enum reset_type siena_map_reset_reason(enum reset_type reason) static enum reset_type siena_map_reset_reason(enum reset_type reason)
{ {
return RESET_TYPE_ALL; return RESET_TYPE_RECOVER_OR_ALL;
} }
static int siena_map_reset_flags(u32 *flags) static int siena_map_reset_flags(u32 *flags)
...@@ -245,6 +245,22 @@ static int siena_reset_hw(struct efx_nic *efx, enum reset_type method) ...@@ -245,6 +245,22 @@ static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
return efx_mcdi_reset_port(efx); return efx_mcdi_reset_port(efx);
} }
#ifdef CONFIG_EEH
/* When a PCI device is isolated from the bus, a subsequent MMIO read is
* required for the kernel EEH mechanisms to notice. As the Solarflare driver
* was written to minimise MMIO read (for latency) then a periodic call to check
* the EEH status of the device is required so that device recovery can happen
* in a timely fashion.
*/
static void siena_monitor(struct efx_nic *efx)
{
struct eeh_dev *eehdev =
of_node_to_eeh_dev(pci_device_to_OF_node(efx->pci_dev));
eeh_dev_check_failure(eehdev);
}
#endif
static int siena_probe_nvconfig(struct efx_nic *efx) static int siena_probe_nvconfig(struct efx_nic *efx)
{ {
u32 caps = 0; u32 caps = 0;
...@@ -398,6 +414,8 @@ static int siena_init_nic(struct efx_nic *efx) ...@@ -398,6 +414,8 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1); EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_INSRT_HDR, 1);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1); EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_HASH_ALG, 1);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1); EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_IP_HASH, 1);
EFX_SET_OWORD_FIELD(temp, FRF_BZ_RX_USR_BUF_SIZE,
EFX_RX_USR_BUF_SIZE >> 5);
efx_writeo(efx, &temp, FR_AZ_RX_CFG); efx_writeo(efx, &temp, FR_AZ_RX_CFG);
/* Set hash key for IPv4 */ /* Set hash key for IPv4 */
...@@ -665,7 +683,11 @@ const struct efx_nic_type siena_a0_nic_type = { ...@@ -665,7 +683,11 @@ const struct efx_nic_type siena_a0_nic_type = {
.init = siena_init_nic, .init = siena_init_nic,
.dimension_resources = siena_dimension_resources, .dimension_resources = siena_dimension_resources,
.fini = efx_port_dummy_op_void, .fini = efx_port_dummy_op_void,
#ifdef CONFIG_EEH
.monitor = siena_monitor,
#else
.monitor = NULL, .monitor = NULL,
#endif
.map_reset_reason = siena_map_reset_reason, .map_reset_reason = siena_map_reset_reason,
.map_reset_flags = siena_map_reset_flags, .map_reset_flags = siena_map_reset_flags,
.reset = siena_reset_hw, .reset = siena_reset_hw,
...@@ -698,6 +720,7 @@ const struct efx_nic_type siena_a0_nic_type = { ...@@ -698,6 +720,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
.rx_buffer_hash_size = 0x10, .rx_buffer_hash_size = 0x10,
.rx_buffer_padding = 0, .rx_buffer_padding = 0,
.can_rx_scatter = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX, .max_interrupt_mode = EFX_INT_MODE_MSIX,
.phys_addr_channels = 32, /* Hardware limit is 64, but the legacy .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
* interrupt handler only supports 32 * interrupt handler only supports 32
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册