提交 b1840060 编写于 作者: D David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2014-09-23

This patch series adds support for the FM10000 Ethernet switch host
interface.  The Intel FM10000 Ethernet Switch is a 48-port Ethernet switch
supporting both Ethernet ports and PCI Express host interfaces.  The fm10k
driver provides support for the host interface portion of the switch, both
PF and VF.

As the host interfaces are directly connected to the switch this results in
some significant differences versus a standard network driver.  For example
there is no PHY or MII on the device.  Since packets are delivered directly
from the switch to the host interface these are unnecessary.  Otherwise most
of the functionality is very similar to our other network drivers such as
ixgbe or igb.  For example we support all the standard network offloads,
jumbo frames, SR-IOV (64 VFS), PTP, and some VXLAN and NVGRE offloads.

v2: converted dev_consume_skb_any() to dev_kfree_skb_any()
    fix up PTP code based on feedback from the community
v3: converted the use of smb_mb__before_clear_bit() to smb_mb__before_atomic()
    added vmalloc header to patch 15
    added prefetch header to patch 16
====================
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
......@@ -300,4 +300,23 @@ config I40EVF
will be called i40evf. MSI-X interrupt support is required
for this driver to work correctly.
config FM10K
tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support"
default n
depends on PCI_MSI
---help---
This driver supports Intel(R) FM10000 Ethernet Switch Host
Interface. For more information on how to identify your adapter,
go to the Adapter & Driver ID Guide at:
<http://support.intel.com/support/network/sb/CS-008441.htm>
For general information and support, go to the Intel support
website at:
<http://support.intel.com>
To compile this driver as a module, choose M here. The module
will be called fm10k. MSI-X interrupt support is required
endif # NET_VENDOR_INTEL
......@@ -12,3 +12,4 @@ obj-$(CONFIG_IXGBEVF) += ixgbevf/
obj-$(CONFIG_I40E) += i40e/
obj-$(CONFIG_IXGB) += ixgb/
obj-$(CONFIG_I40EVF) += i40evf/
obj-$(CONFIG_FM10K) += fm10k/
################################################################################
#
# Intel Ethernet Switch Host Interface Driver
# Copyright(c) 2013 - 2014 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
# version 2, as published by the Free Software Foundation.
#
# This program is distributed in the hope it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
#
# Contact Information:
# e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
# Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
#
################################################################################
#
# Makefile for the Intel(R) FM10000 Ethernet Switch Host Interface driver
#
obj-$(CONFIG_FM10K) += fm10k.o
fm10k-objs := fm10k_main.o fm10k_common.o fm10k_pci.o \
fm10k_netdev.o fm10k_ethtool.o fm10k_pf.o fm10k_vf.o \
fm10k_mbx.o fm10k_iov.o fm10k_tlv.o \
fm10k_debugfs.o fm10k_ptp.o fm10k_dcbnl.o
/* Intel Ethernet Switch Host Interface Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _FM10K_H_
#define _FM10K_H_
#include <linux/types.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
#include <linux/pci.h>
#include <linux/net_tstamp.h>
#include <linux/clocksource.h>
#include <linux/ptp_clock_kernel.h>
#include "fm10k_pf.h"
#include "fm10k_vf.h"
#define FM10K_MAX_JUMBO_FRAME_SIZE 15358 /* Maximum supported size 15K */
#define MAX_QUEUES FM10K_MAX_QUEUES_PF
#define FM10K_MIN_RXD 128
#define FM10K_MAX_RXD 4096
#define FM10K_DEFAULT_RXD 256
#define FM10K_MIN_TXD 128
#define FM10K_MAX_TXD 4096
#define FM10K_DEFAULT_TXD 256
#define FM10K_DEFAULT_TX_WORK 256
#define FM10K_RXBUFFER_256 256
#define FM10K_RXBUFFER_16384 16384
#define FM10K_RX_HDR_LEN FM10K_RXBUFFER_256
#if PAGE_SIZE <= FM10K_RXBUFFER_16384
#define FM10K_RX_BUFSZ (PAGE_SIZE / 2)
#else
#define FM10K_RX_BUFSZ FM10K_RXBUFFER_16384
#endif
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define FM10K_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define FM10K_MAX_STATIONS 63
struct fm10k_l2_accel {
int size;
u16 count;
u16 dglort;
struct rcu_head rcu;
struct net_device *macvlan[0];
};
enum fm10k_ring_state_t {
__FM10K_TX_DETECT_HANG,
__FM10K_HANG_CHECK_ARMED,
};
#define check_for_tx_hang(ring) \
test_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
#define set_check_for_tx_hang(ring) \
set_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
clear_bit(__FM10K_TX_DETECT_HANG, &(ring)->state)
struct fm10k_tx_buffer {
struct fm10k_tx_desc *next_to_watch;
struct sk_buff *skb;
unsigned int bytecount;
u16 gso_segs;
u16 tx_flags;
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
};
struct fm10k_rx_buffer {
dma_addr_t dma;
struct page *page;
u32 page_offset;
};
struct fm10k_queue_stats {
u64 packets;
u64 bytes;
};
struct fm10k_tx_queue_stats {
u64 restart_queue;
u64 csum_err;
u64 tx_busy;
u64 tx_done_old;
};
struct fm10k_rx_queue_stats {
u64 alloc_failed;
u64 csum_err;
u64 errors;
};
struct fm10k_ring {
struct fm10k_q_vector *q_vector;/* backpointer to host q_vector */
struct net_device *netdev; /* netdev ring belongs to */
struct device *dev; /* device for DMA mapping */
struct fm10k_l2_accel __rcu *l2_accel; /* L2 acceleration list */
void *desc; /* descriptor ring memory */
union {
struct fm10k_tx_buffer *tx_buffer;
struct fm10k_rx_buffer *rx_buffer;
};
u32 __iomem *tail;
unsigned long state;
dma_addr_t dma; /* phys. address of descriptor ring */
unsigned int size; /* length in bytes */
u8 queue_index; /* needed for queue management */
u8 reg_idx; /* holds the special value that gets
* the hardware register offset
* associated with this ring, which is
* different for DCB and RSS modes
*/
u8 qos_pc; /* priority class of queue */
u16 vid; /* default vlan ID of queue */
u16 count; /* amount of descriptors */
u16 next_to_alloc;
u16 next_to_use;
u16 next_to_clean;
struct fm10k_queue_stats stats;
struct u64_stats_sync syncp;
union {
/* Tx */
struct fm10k_tx_queue_stats tx_stats;
/* Rx */
struct {
struct fm10k_rx_queue_stats rx_stats;
struct sk_buff *skb;
};
};
} ____cacheline_internodealigned_in_smp;
struct fm10k_ring_container {
struct fm10k_ring *ring; /* pointer to linked list of rings */
unsigned int total_bytes; /* total bytes processed this int */
unsigned int total_packets; /* total packets processed this int */
u16 work_limit; /* total work allowed per interrupt */
u16 itr; /* interrupt throttle rate value */
u8 count; /* total number of rings in vector */
};
#define FM10K_ITR_MAX 0x0FFF /* maximum value for ITR */
#define FM10K_ITR_10K 100 /* 100us */
#define FM10K_ITR_20K 50 /* 50us */
#define FM10K_ITR_ADAPTIVE 0x8000 /* adaptive interrupt moderation flag */
#define FM10K_ITR_ENABLE (FM10K_ITR_AUTOMASK | FM10K_ITR_MASK_CLEAR)
static inline struct netdev_queue *txring_txq(const struct fm10k_ring *ring)
{
return &ring->netdev->_tx[ring->queue_index];
}
/* iterator for handling rings in ring container */
#define fm10k_for_each_ring(pos, head) \
for (pos = &(head).ring[(head).count]; (--pos) >= (head).ring;)
#define MAX_Q_VECTORS 256
#define MIN_Q_VECTORS 1
enum fm10k_non_q_vectors {
FM10K_MBX_VECTOR,
#define NON_Q_VECTORS_VF NON_Q_VECTORS_PF
NON_Q_VECTORS_PF
};
#define NON_Q_VECTORS(hw) (((hw)->mac.type == fm10k_mac_pf) ? \
NON_Q_VECTORS_PF : \
NON_Q_VECTORS_VF)
#define MIN_MSIX_COUNT(hw) (MIN_Q_VECTORS + NON_Q_VECTORS(hw))
struct fm10k_q_vector {
struct fm10k_intfc *interface;
u32 __iomem *itr; /* pointer to ITR register for this vector */
u16 v_idx; /* index of q_vector within interface array */
struct fm10k_ring_container rx, tx;
struct napi_struct napi;
char name[IFNAMSIZ + 9];
#ifdef CONFIG_DEBUG_FS
struct dentry *dbg_q_vector;
#endif /* CONFIG_DEBUG_FS */
struct rcu_head rcu; /* to avoid race with update stats on free */
/* for dynamic allocation of rings associated with this q_vector */
struct fm10k_ring ring[0] ____cacheline_internodealigned_in_smp;
};
enum fm10k_ring_f_enum {
RING_F_RSS,
RING_F_QOS,
RING_F_ARRAY_SIZE /* must be last in enum set */
};
struct fm10k_ring_feature {
u16 limit; /* upper limit on feature indices */
u16 indices; /* current value of indices */
u16 mask; /* Mask used for feature to ring mapping */
u16 offset; /* offset to start of feature */
};
struct fm10k_iov_data {
unsigned int num_vfs;
unsigned int next_vf_mbx;
struct rcu_head rcu;
struct fm10k_vf_info vf_info[0];
};
#define fm10k_vxlan_port_for_each(vp, intfc) \
list_for_each_entry(vp, &(intfc)->vxlan_port, list)
struct fm10k_vxlan_port {
struct list_head list;
sa_family_t sa_family;
__be16 port;
};
struct fm10k_intfc {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct net_device *netdev;
struct fm10k_l2_accel *l2_accel; /* pointer to L2 acceleration list */
struct pci_dev *pdev;
unsigned long state;
u32 flags;
#define FM10K_FLAG_RESET_REQUESTED (u32)(1 << 0)
#define FM10K_FLAG_RSS_FIELD_IPV4_UDP (u32)(1 << 1)
#define FM10K_FLAG_RSS_FIELD_IPV6_UDP (u32)(1 << 2)
#define FM10K_FLAG_RX_TS_ENABLED (u32)(1 << 3)
#define FM10K_FLAG_SWPRI_CONFIG (u32)(1 << 4)
int xcast_mode;
/* Tx fast path data */
int num_tx_queues;
u16 tx_itr;
/* Rx fast path data */
int num_rx_queues;
u16 rx_itr;
/* TX */
struct fm10k_ring *tx_ring[MAX_QUEUES] ____cacheline_aligned_in_smp;
u64 restart_queue;
u64 tx_busy;
u64 tx_csum_errors;
u64 alloc_failed;
u64 rx_csum_errors;
u64 rx_errors;
u64 tx_bytes_nic;
u64 tx_packets_nic;
u64 rx_bytes_nic;
u64 rx_packets_nic;
u64 rx_drops_nic;
u64 rx_overrun_pf;
u64 rx_overrun_vf;
u32 tx_timeout_count;
/* RX */
struct fm10k_ring *rx_ring[MAX_QUEUES];
/* Queueing vectors */
struct fm10k_q_vector *q_vector[MAX_Q_VECTORS];
struct msix_entry *msix_entries;
int num_q_vectors; /* current number of q_vectors for device */
struct fm10k_ring_feature ring_feature[RING_F_ARRAY_SIZE];
/* SR-IOV information management structure */
struct fm10k_iov_data *iov_data;
struct fm10k_hw_stats stats;
struct fm10k_hw hw;
u32 __iomem *uc_addr;
u32 __iomem *sw_addr;
u16 msg_enable;
u16 tx_ring_count;
u16 rx_ring_count;
struct timer_list service_timer;
struct work_struct service_task;
unsigned long next_stats_update;
unsigned long next_tx_hang_check;
unsigned long last_reset;
unsigned long link_down_event;
bool host_ready;
u32 reta[FM10K_RETA_SIZE];
u32 rssrk[FM10K_RSSRK_SIZE];
/* VXLAN port tracking information */
struct list_head vxlan_port;
#ifdef CONFIG_DEBUG_FS
struct dentry *dbg_intfc;
#endif /* CONFIG_DEBUG_FS */
struct ptp_clock_info ptp_caps;
struct ptp_clock *ptp_clock;
struct sk_buff_head ts_tx_skb_queue;
u32 tx_hwtstamp_timeouts;
struct hwtstamp_config ts_config;
/* We are unable to actually adjust the clock beyond the frequency
* value. Once the clock is started there is no resetting it. As
* such we maintain a separate offset from the actual hardware clock
* to allow for offset adjustment.
*/
s64 ptp_adjust;
rwlock_t systime_lock;
#ifdef CONFIG_DCB
u8 pfc_en;
#endif
u8 rx_pause;
/* GLORT resources in use by PF */
u16 glort;
u16 glort_count;
/* VLAN ID for updating multicast/unicast lists */
u16 vid;
};
enum fm10k_state_t {
__FM10K_RESETTING,
__FM10K_DOWN,
__FM10K_SERVICE_SCHED,
__FM10K_SERVICE_DISABLE,
__FM10K_MBX_LOCK,
__FM10K_LINK_DOWN,
};
static inline void fm10k_mbx_lock(struct fm10k_intfc *interface)
{
/* busy loop if we cannot obtain the lock as some calls
* such as ndo_set_rx_mode may be made in atomic context
*/
while (test_and_set_bit(__FM10K_MBX_LOCK, &interface->state))
udelay(20);
}
static inline void fm10k_mbx_unlock(struct fm10k_intfc *interface)
{
/* flush memory to make sure state is correct */
smp_mb__before_atomic();
clear_bit(__FM10K_MBX_LOCK, &interface->state);
}
static inline int fm10k_mbx_trylock(struct fm10k_intfc *interface)
{
return !test_and_set_bit(__FM10K_MBX_LOCK, &interface->state);
}
/* fm10k_test_staterr - test bits in Rx descriptor status and error fields */
static inline __le32 fm10k_test_staterr(union fm10k_rx_desc *rx_desc,
const u32 stat_err_bits)
{
return rx_desc->d.staterr & cpu_to_le32(stat_err_bits);
}
/* fm10k_desc_unused - calculate if we have unused descriptors */
static inline u16 fm10k_desc_unused(struct fm10k_ring *ring)
{
s16 unused = ring->next_to_clean - ring->next_to_use - 1;
return likely(unused < 0) ? unused + ring->count : unused;
}
#define FM10K_TX_DESC(R, i) \
(&(((struct fm10k_tx_desc *)((R)->desc))[i]))
#define FM10K_RX_DESC(R, i) \
(&(((union fm10k_rx_desc *)((R)->desc))[i]))
#define FM10K_MAX_TXD_PWR 14
#define FM10K_MAX_DATA_PER_TXD (1 << FM10K_MAX_TXD_PWR)
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), FM10K_MAX_DATA_PER_TXD)
#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
enum fm10k_tx_flags {
/* Tx offload flags */
FM10K_TX_FLAGS_CSUM = 0x01,
};
/* This structure is stored as little endian values as that is the native
* format of the Rx descriptor. The ordering of these fields is reversed
* from the actual ftag header to allow for a single bswap to take care
* of placing all of the values in network order
*/
union fm10k_ftag_info {
__le64 ftag;
struct {
/* dglort and sglort combined into a single 32bit desc read */
__le32 glort;
/* upper 16 bits of vlan are reserved 0 for swpri_type_user */
__le32 vlan;
} d;
struct {
__le16 dglort;
__le16 sglort;
__le16 vlan;
__le16 swpri_type_user;
} w;
};
struct fm10k_cb {
union {
__le64 tstamp;
unsigned long ts_tx_timeout;
};
union fm10k_ftag_info fi;
};
#define FM10K_CB(skb) ((struct fm10k_cb *)(skb)->cb)
/* main */
extern char fm10k_driver_name[];
extern const char fm10k_driver_version[];
int fm10k_init_queueing_scheme(struct fm10k_intfc *interface);
void fm10k_clear_queueing_scheme(struct fm10k_intfc *interface);
netdev_tx_t fm10k_xmit_frame_ring(struct sk_buff *skb,
struct fm10k_ring *tx_ring);
void fm10k_tx_timeout_reset(struct fm10k_intfc *interface);
bool fm10k_check_tx_hang(struct fm10k_ring *tx_ring);
void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count);
/* PCI */
void fm10k_mbx_free_irq(struct fm10k_intfc *);
int fm10k_mbx_request_irq(struct fm10k_intfc *);
void fm10k_qv_free_irq(struct fm10k_intfc *interface);
int fm10k_qv_request_irq(struct fm10k_intfc *interface);
int fm10k_register_pci_driver(void);
void fm10k_unregister_pci_driver(void);
void fm10k_up(struct fm10k_intfc *interface);
void fm10k_down(struct fm10k_intfc *interface);
void fm10k_update_stats(struct fm10k_intfc *interface);
void fm10k_service_event_schedule(struct fm10k_intfc *interface);
void fm10k_update_rx_drop_en(struct fm10k_intfc *interface);
/* Netdev */
struct net_device *fm10k_alloc_netdev(void);
int fm10k_setup_rx_resources(struct fm10k_ring *);
int fm10k_setup_tx_resources(struct fm10k_ring *);
void fm10k_free_rx_resources(struct fm10k_ring *);
void fm10k_free_tx_resources(struct fm10k_ring *);
void fm10k_clean_all_rx_rings(struct fm10k_intfc *);
void fm10k_clean_all_tx_rings(struct fm10k_intfc *);
void fm10k_unmap_and_free_tx_resource(struct fm10k_ring *,
struct fm10k_tx_buffer *);
void fm10k_restore_rx_state(struct fm10k_intfc *);
void fm10k_reset_rx_state(struct fm10k_intfc *);
int fm10k_setup_tc(struct net_device *dev, u8 tc);
int fm10k_open(struct net_device *netdev);
int fm10k_close(struct net_device *netdev);
/* Ethtool */
void fm10k_set_ethtool_ops(struct net_device *dev);
/* IOV */
s32 fm10k_iov_event(struct fm10k_intfc *interface);
s32 fm10k_iov_mbx(struct fm10k_intfc *interface);
void fm10k_iov_suspend(struct pci_dev *pdev);
int fm10k_iov_resume(struct pci_dev *pdev);
void fm10k_iov_disable(struct pci_dev *pdev);
int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs);
s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid);
int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac);
int fm10k_ndo_set_vf_vlan(struct net_device *netdev,
int vf_idx, u16 vid, u8 qos);
int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int rate,
int unused);
int fm10k_ndo_get_vf_config(struct net_device *netdev,
int vf_idx, struct ifla_vf_info *ivi);
/* DebugFS */
#ifdef CONFIG_DEBUG_FS
void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector);
void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector);
void fm10k_dbg_intfc_init(struct fm10k_intfc *interface);
void fm10k_dbg_intfc_exit(struct fm10k_intfc *interface);
void fm10k_dbg_init(void);
void fm10k_dbg_exit(void);
#else
static inline void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector) {}
static inline void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector) {}
static inline void fm10k_dbg_intfc_init(struct fm10k_intfc *interface) {}
static inline void fm10k_dbg_intfc_exit(struct fm10k_intfc *interface) {}
static inline void fm10k_dbg_init(void) {}
static inline void fm10k_dbg_exit(void) {}
#endif /* CONFIG_DEBUG_FS */
/* Time Stamping */
void fm10k_systime_to_hwtstamp(struct fm10k_intfc *interface,
struct skb_shared_hwtstamps *hwtstamp,
u64 systime);
void fm10k_ts_tx_enqueue(struct fm10k_intfc *interface, struct sk_buff *skb);
void fm10k_ts_tx_hwtstamp(struct fm10k_intfc *interface, __le16 dglort,
u64 systime);
void fm10k_ts_reset(struct fm10k_intfc *interface);
void fm10k_ts_init(struct fm10k_intfc *interface);
void fm10k_ts_tx_subtask(struct fm10k_intfc *interface);
void fm10k_ptp_register(struct fm10k_intfc *interface);
void fm10k_ptp_unregister(struct fm10k_intfc *interface);
int fm10k_get_ts_config(struct net_device *netdev, struct ifreq *ifr);
int fm10k_set_ts_config(struct net_device *netdev, struct ifreq *ifr);
/* DCB */
void fm10k_dcbnl_set_ops(struct net_device *dev);
#endif /* _FM10K_H_ */
/* Intel Ethernet Switch Host Interface Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include "fm10k_common.h"
/**
* fm10k_get_bus_info_generic - Generic set PCI bus info
* @hw: pointer to hardware structure
*
* Gets the PCI bus info (speed, width, type) then calls helper function to
* store this data within the fm10k_hw structure.
**/
s32 fm10k_get_bus_info_generic(struct fm10k_hw *hw)
{
u16 link_cap, link_status, device_cap, device_control;
/* Get the maximum link width and speed from PCIe config space */
link_cap = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_LINK_CAP);
switch (link_cap & FM10K_PCIE_LINK_WIDTH) {
case FM10K_PCIE_LINK_WIDTH_1:
hw->bus_caps.width = fm10k_bus_width_pcie_x1;
break;
case FM10K_PCIE_LINK_WIDTH_2:
hw->bus_caps.width = fm10k_bus_width_pcie_x2;
break;
case FM10K_PCIE_LINK_WIDTH_4:
hw->bus_caps.width = fm10k_bus_width_pcie_x4;
break;
case FM10K_PCIE_LINK_WIDTH_8:
hw->bus_caps.width = fm10k_bus_width_pcie_x8;
break;
default:
hw->bus_caps.width = fm10k_bus_width_unknown;
break;
}
switch (link_cap & FM10K_PCIE_LINK_SPEED) {
case FM10K_PCIE_LINK_SPEED_2500:
hw->bus_caps.speed = fm10k_bus_speed_2500;
break;
case FM10K_PCIE_LINK_SPEED_5000:
hw->bus_caps.speed = fm10k_bus_speed_5000;
break;
case FM10K_PCIE_LINK_SPEED_8000:
hw->bus_caps.speed = fm10k_bus_speed_8000;
break;
default:
hw->bus_caps.speed = fm10k_bus_speed_unknown;
break;
}
/* Get the PCIe maximum payload size for the PCIe function */
device_cap = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_DEV_CAP);
switch (device_cap & FM10K_PCIE_DEV_CAP_PAYLOAD) {
case FM10K_PCIE_DEV_CAP_PAYLOAD_128:
hw->bus_caps.payload = fm10k_bus_payload_128;
break;
case FM10K_PCIE_DEV_CAP_PAYLOAD_256:
hw->bus_caps.payload = fm10k_bus_payload_256;
break;
case FM10K_PCIE_DEV_CAP_PAYLOAD_512:
hw->bus_caps.payload = fm10k_bus_payload_512;
break;
default:
hw->bus_caps.payload = fm10k_bus_payload_unknown;
break;
}
/* Get the negotiated link width and speed from PCIe config space */
link_status = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_LINK_STATUS);
switch (link_status & FM10K_PCIE_LINK_WIDTH) {
case FM10K_PCIE_LINK_WIDTH_1:
hw->bus.width = fm10k_bus_width_pcie_x1;
break;
case FM10K_PCIE_LINK_WIDTH_2:
hw->bus.width = fm10k_bus_width_pcie_x2;
break;
case FM10K_PCIE_LINK_WIDTH_4:
hw->bus.width = fm10k_bus_width_pcie_x4;
break;
case FM10K_PCIE_LINK_WIDTH_8:
hw->bus.width = fm10k_bus_width_pcie_x8;
break;
default:
hw->bus.width = fm10k_bus_width_unknown;
break;
}
switch (link_status & FM10K_PCIE_LINK_SPEED) {
case FM10K_PCIE_LINK_SPEED_2500:
hw->bus.speed = fm10k_bus_speed_2500;
break;
case FM10K_PCIE_LINK_SPEED_5000:
hw->bus.speed = fm10k_bus_speed_5000;
break;
case FM10K_PCIE_LINK_SPEED_8000:
hw->bus.speed = fm10k_bus_speed_8000;
break;
default:
hw->bus.speed = fm10k_bus_speed_unknown;
break;
}
/* Get the negotiated PCIe maximum payload size for the PCIe function */
device_control = fm10k_read_pci_cfg_word(hw, FM10K_PCIE_DEV_CTRL);
switch (device_control & FM10K_PCIE_DEV_CTRL_PAYLOAD) {
case FM10K_PCIE_DEV_CTRL_PAYLOAD_128:
hw->bus.payload = fm10k_bus_payload_128;
break;
case FM10K_PCIE_DEV_CTRL_PAYLOAD_256:
hw->bus.payload = fm10k_bus_payload_256;
break;
case FM10K_PCIE_DEV_CTRL_PAYLOAD_512:
hw->bus.payload = fm10k_bus_payload_512;
break;
default:
hw->bus.payload = fm10k_bus_payload_unknown;
break;
}
return 0;
}
static u16 fm10k_get_pcie_msix_count_generic(struct fm10k_hw *hw)
{
u16 msix_count;
/* read in value from MSI-X capability register */
msix_count = fm10k_read_pci_cfg_word(hw, FM10K_PCI_MSIX_MSG_CTRL);
msix_count &= FM10K_PCI_MSIX_MSG_CTRL_TBL_SZ_MASK;
/* MSI-X count is zero-based in HW */
msix_count++;
if (msix_count > FM10K_MAX_MSIX_VECTORS)
msix_count = FM10K_MAX_MSIX_VECTORS;
return msix_count;
}
/**
* fm10k_get_invariants_generic - Inits constant values
* @hw: pointer to the hardware structure
*
* Initialize the common invariants for the device.
**/
s32 fm10k_get_invariants_generic(struct fm10k_hw *hw)
{
struct fm10k_mac_info *mac = &hw->mac;
/* initialize GLORT state to avoid any false hits */
mac->dglort_map = FM10K_DGLORTMAP_NONE;
/* record maximum number of MSI-X vectors */
mac->max_msix_vectors = fm10k_get_pcie_msix_count_generic(hw);
return 0;
}
/**
* fm10k_start_hw_generic - Prepare hardware for Tx/Rx
* @hw: pointer to hardware structure
*
* This function sets the Tx ready flag to indicate that the Tx path has
* been initialized.
**/
s32 fm10k_start_hw_generic(struct fm10k_hw *hw)
{
/* set flag indicating we are beginning Tx */
hw->mac.tx_ready = true;
return 0;
}
/**
* fm10k_disable_queues_generic - Stop Tx/Rx queues
* @hw: pointer to hardware structure
* @q_cnt: number of queues to be disabled
*
**/
s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt)
{
u32 reg;
u16 i, time;
/* clear tx_ready to prevent any false hits for reset */
hw->mac.tx_ready = false;
/* clear the enable bit for all rings */
for (i = 0; i < q_cnt; i++) {
reg = fm10k_read_reg(hw, FM10K_TXDCTL(i));
fm10k_write_reg(hw, FM10K_TXDCTL(i),
reg & ~FM10K_TXDCTL_ENABLE);
reg = fm10k_read_reg(hw, FM10K_RXQCTL(i));
fm10k_write_reg(hw, FM10K_RXQCTL(i),
reg & ~FM10K_RXQCTL_ENABLE);
}
fm10k_write_flush(hw);
udelay(1);
/* loop through all queues to verify that they are all disabled */
for (i = 0, time = FM10K_QUEUE_DISABLE_TIMEOUT; time;) {
/* if we are at end of rings all rings are disabled */
if (i == q_cnt)
return 0;
/* if queue enables cleared, then move to next ring pair */
reg = fm10k_read_reg(hw, FM10K_TXDCTL(i));
if (!~reg || !(reg & FM10K_TXDCTL_ENABLE)) {
reg = fm10k_read_reg(hw, FM10K_RXQCTL(i));
if (!~reg || !(reg & FM10K_RXQCTL_ENABLE)) {
i++;
continue;
}
}
/* decrement time and wait 1 usec */
time--;
if (time)
udelay(1);
}
return FM10K_ERR_REQUESTS_PENDING;
}
/**
* fm10k_stop_hw_generic - Stop Tx/Rx units
* @hw: pointer to hardware structure
*
**/
s32 fm10k_stop_hw_generic(struct fm10k_hw *hw)
{
return fm10k_disable_queues_generic(hw, hw->mac.max_queues);
}
/**
* fm10k_read_hw_stats_32b - Reads value of 32-bit registers
* @hw: pointer to the hardware structure
* @addr: address of register containing a 32-bit value
*
* Function reads the content of the register and returns the delta
* between the base and the current value.
* **/
u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr,
struct fm10k_hw_stat *stat)
{
u32 delta = fm10k_read_reg(hw, addr) - stat->base_l;
if (FM10K_REMOVED(hw->hw_addr))
stat->base_h = 0;
return delta;
}
/**
* fm10k_read_hw_stats_48b - Reads value of 48-bit registers
* @hw: pointer to the hardware structure
* @addr: address of register containing the lower 32-bit value
*
* Function reads the content of 2 registers, combined to represent a 48-bit
* statistical value. Extra processing is required to handle overflowing.
* Finally, a delta value is returned representing the difference between the
* values stored in registers and values stored in the statistic counters.
* **/
static u64 fm10k_read_hw_stats_48b(struct fm10k_hw *hw, u32 addr,
struct fm10k_hw_stat *stat)
{
u32 count_l;
u32 count_h;
u32 count_tmp;
u64 delta;
count_h = fm10k_read_reg(hw, addr + 1);
/* Check for overflow */
do {
count_tmp = count_h;
count_l = fm10k_read_reg(hw, addr);
count_h = fm10k_read_reg(hw, addr + 1);
} while (count_h != count_tmp);
delta = ((u64)(count_h - stat->base_h) << 32) + count_l;
delta -= stat->base_l;
return delta & FM10K_48_BIT_MASK;
}
/**
* fm10k_update_hw_base_48b - Updates 48-bit statistic base value
* @stat: pointer to the hardware statistic structure
* @delta: value to be updated into the hardware statistic structure
*
* Function receives a value and determines if an update is required based on
* a delta calculation. Only the base value will be updated.
**/
static void fm10k_update_hw_base_48b(struct fm10k_hw_stat *stat, u64 delta)
{
if (!delta)
return;
/* update lower 32 bits */
delta += stat->base_l;
stat->base_l = (u32)delta;
/* update upper 32 bits */
stat->base_h += (u32)(delta >> 32);
}
/**
* fm10k_update_hw_stats_tx_q - Updates TX queue statistics counters
* @hw: pointer to the hardware structure
* @q: pointer to the ring of hardware statistics queue
* @idx: index pointing to the start of the ring iteration
*
* Function updates the TX queue statistics counters that are related to the
* hardware.
**/
static void fm10k_update_hw_stats_tx_q(struct fm10k_hw *hw,
struct fm10k_hw_stats_q *q,
u32 idx)
{
u32 id_tx, id_tx_prev, tx_packets;
u64 tx_bytes = 0;
/* Retrieve TX Owner Data */
id_tx = fm10k_read_reg(hw, FM10K_TXQCTL(idx));
/* Process TX Ring */
do {
tx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPTC(idx),
&q->tx_packets);
if (tx_packets)
tx_bytes = fm10k_read_hw_stats_48b(hw,
FM10K_QBTC_L(idx),
&q->tx_bytes);
/* Re-Check Owner Data */
id_tx_prev = id_tx;
id_tx = fm10k_read_reg(hw, FM10K_TXQCTL(idx));
} while ((id_tx ^ id_tx_prev) & FM10K_TXQCTL_ID_MASK);
/* drop non-ID bits and set VALID ID bit */
id_tx &= FM10K_TXQCTL_ID_MASK;
id_tx |= FM10K_STAT_VALID;
/* update packet counts */
if (q->tx_stats_idx == id_tx) {
q->tx_packets.count += tx_packets;
q->tx_bytes.count += tx_bytes;
}
/* update bases and record ID */
fm10k_update_hw_base_32b(&q->tx_packets, tx_packets);
fm10k_update_hw_base_48b(&q->tx_bytes, tx_bytes);
q->tx_stats_idx = id_tx;
}
/**
* fm10k_update_hw_stats_rx_q - Updates RX queue statistics counters
* @hw: pointer to the hardware structure
* @q: pointer to the ring of hardware statistics queue
* @idx: index pointing to the start of the ring iteration
*
* Function updates the RX queue statistics counters that are related to the
* hardware.
**/
static void fm10k_update_hw_stats_rx_q(struct fm10k_hw *hw,
struct fm10k_hw_stats_q *q,
u32 idx)
{
u32 id_rx, id_rx_prev, rx_packets, rx_drops;
u64 rx_bytes = 0;
/* Retrieve RX Owner Data */
id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx));
/* Process RX Ring*/
do {
rx_drops = fm10k_read_hw_stats_32b(hw, FM10K_QPRDC(idx),
&q->rx_drops);
rx_packets = fm10k_read_hw_stats_32b(hw, FM10K_QPRC(idx),
&q->rx_packets);
if (rx_packets)
rx_bytes = fm10k_read_hw_stats_48b(hw,
FM10K_QBRC_L(idx),
&q->rx_bytes);
/* Re-Check Owner Data */
id_rx_prev = id_rx;
id_rx = fm10k_read_reg(hw, FM10K_RXQCTL(idx));
} while ((id_rx ^ id_rx_prev) & FM10K_RXQCTL_ID_MASK);
/* drop non-ID bits and set VALID ID bit */
id_rx &= FM10K_RXQCTL_ID_MASK;
id_rx |= FM10K_STAT_VALID;
/* update packet counts */
if (q->rx_stats_idx == id_rx) {
q->rx_drops.count += rx_drops;
q->rx_packets.count += rx_packets;
q->rx_bytes.count += rx_bytes;
}
/* update bases and record ID */
fm10k_update_hw_base_32b(&q->rx_drops, rx_drops);
fm10k_update_hw_base_32b(&q->rx_packets, rx_packets);
fm10k_update_hw_base_48b(&q->rx_bytes, rx_bytes);
q->rx_stats_idx = id_rx;
}
/**
* fm10k_update_hw_stats_q - Updates queue statistics counters
* @hw: pointer to the hardware structure
* @q: pointer to the ring of hardware statistics queue
* @idx: index pointing to the start of the ring iteration
* @count: number of queues to iterate over
*
* Function updates the queue statistics counters that are related to the
* hardware.
**/
void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
u32 idx, u32 count)
{
u32 i;
for (i = 0; i < count; i++, idx++, q++) {
fm10k_update_hw_stats_tx_q(hw, q, idx);
fm10k_update_hw_stats_rx_q(hw, q, idx);
}
}
/**
* fm10k_unbind_hw_stats_q - Unbind the queue counters from their queues
* @hw: pointer to the hardware structure
* @q: pointer to the ring of hardware statistics queue
* @idx: index pointing to the start of the ring iteration
* @count: number of queues to iterate over
*
* Function invalidates the index values for the queues so any updates that
* may have happened are ignored and the base for the queue stats is reset.
**/
void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count)
{
u32 i;
for (i = 0; i < count; i++, idx++, q++) {
q->rx_stats_idx = 0;
q->tx_stats_idx = 0;
}
}
/**
* fm10k_get_host_state_generic - Returns the state of the host
* @hw: pointer to hardware structure
* @host_ready: pointer to boolean value that will record host state
*
* This function will check the health of the mailbox and Tx queue 0
* in order to determine if we should report that the link is up or not.
**/
s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready)
{
struct fm10k_mbx_info *mbx = &hw->mbx;
struct fm10k_mac_info *mac = &hw->mac;
s32 ret_val = 0;
u32 txdctl = fm10k_read_reg(hw, FM10K_TXDCTL(0));
/* process upstream mailbox in case interrupts were disabled */
mbx->ops.process(hw, mbx);
/* If Tx is no longer enabled link should come down */
if (!(~txdctl) || !(txdctl & FM10K_TXDCTL_ENABLE))
mac->get_host_state = true;
/* exit if not checking for link, or link cannot be changed */
if (!mac->get_host_state || !(~txdctl))
goto out;
/* if we somehow dropped the Tx enable we should reset */
if (hw->mac.tx_ready && !(txdctl & FM10K_TXDCTL_ENABLE)) {
ret_val = FM10K_ERR_RESET_REQUESTED;
goto out;
}
/* if Mailbox timed out we should request reset */
if (!mbx->timeout) {
ret_val = FM10K_ERR_RESET_REQUESTED;
goto out;
}
/* verify Mailbox is still valid */
if (!mbx->ops.tx_ready(mbx, FM10K_VFMBX_MSG_MTU))
goto out;
/* interface cannot receive traffic without logical ports */
if (mac->dglort_map == FM10K_DGLORTMAP_NONE)
goto out;
/* if we passed all the tests above then the switch is ready and we no
* longer need to check for link
*/
mac->get_host_state = false;
out:
*host_ready = !mac->get_host_state;
return ret_val;
}
/* Intel Ethernet Switch Host Interface Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _FM10K_COMMON_H_
#define _FM10K_COMMON_H_
#include "fm10k_type.h"
#define FM10K_REMOVED(hw_addr) unlikely(!(hw_addr))
/* PCI configuration read */
u16 fm10k_read_pci_cfg_word(struct fm10k_hw *hw, u32 reg);
/* read operations, indexed using DWORDS */
u32 fm10k_read_reg(struct fm10k_hw *hw, int reg);
/* write operations, indexed using DWORDS */
#define fm10k_write_reg(hw, reg, val) \
do { \
u32 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \
if (!FM10K_REMOVED(hw_addr)) \
writel((val), &hw_addr[(reg)]); \
} while (0)
/* Switch register write operations, index using DWORDS */
#define fm10k_write_sw_reg(hw, reg, val) \
do { \
u32 __iomem *sw_addr = ACCESS_ONCE((hw)->sw_addr); \
if (!FM10K_REMOVED(sw_addr)) \
writel((val), &sw_addr[(reg)]); \
} while (0)
/* read ctrl register which has no clear on read fields as PCIe flush */
#define fm10k_write_flush(hw) fm10k_read_reg((hw), FM10K_CTRL)
s32 fm10k_get_bus_info_generic(struct fm10k_hw *hw);
s32 fm10k_get_invariants_generic(struct fm10k_hw *hw);
s32 fm10k_disable_queues_generic(struct fm10k_hw *hw, u16 q_cnt);
s32 fm10k_start_hw_generic(struct fm10k_hw *hw);
s32 fm10k_stop_hw_generic(struct fm10k_hw *hw);
u32 fm10k_read_hw_stats_32b(struct fm10k_hw *hw, u32 addr,
struct fm10k_hw_stat *stat);
#define fm10k_update_hw_base_32b(stat, delta) ((stat)->base_l += (delta))
void fm10k_update_hw_stats_q(struct fm10k_hw *hw, struct fm10k_hw_stats_q *q,
u32 idx, u32 count);
#define fm10k_unbind_hw_stats_32b(s) ((s)->base_h = 0)
void fm10k_unbind_hw_stats_q(struct fm10k_hw_stats_q *q, u32 idx, u32 count);
s32 fm10k_get_host_state_generic(struct fm10k_hw *hw, bool *host_ready);
#endif /* _FM10K_COMMON_H_ */
/* Intel Ethernet Switch Host Interface Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include "fm10k.h"
#ifdef CONFIG_DCB
/**
* fm10k_dcbnl_ieee_getets - get the ETS configuration for the device
* @dev: netdev interface for the device
* @ets: ETS structure to push configuration to
**/
static int fm10k_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets)
{
int i;
/* we support 8 TCs in all modes */
ets->ets_cap = IEEE_8021QAZ_MAX_TCS;
ets->cbs = 0;
/* we only support strict priority and cannot do traffic shaping */
memset(ets->tc_tx_bw, 0, sizeof(ets->tc_tx_bw));
memset(ets->tc_rx_bw, 0, sizeof(ets->tc_rx_bw));
memset(ets->tc_tsa, IEEE_8021QAZ_TSA_STRICT, sizeof(ets->tc_tsa));
/* populate the prio map based on the netdev */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
ets->prio_tc[i] = netdev_get_prio_tc_map(dev, i);
return 0;
}
/**
* fm10k_dcbnl_ieee_setets - set the ETS configuration for the device
* @dev: netdev interface for the device
* @ets: ETS structure to pull configuration from
**/
static int fm10k_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets)
{
u8 num_tc = 0;
int i, err;
/* verify type and determine num_tcs needed */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->tc_tx_bw[i] || ets->tc_rx_bw[i])
return -EINVAL;
if (ets->tc_tsa[i] != IEEE_8021QAZ_TSA_STRICT)
return -EINVAL;
if (ets->prio_tc[i] > num_tc)
num_tc = ets->prio_tc[i];
}
/* if requested TC is greater than 0 then num_tcs is max + 1 */
if (num_tc)
num_tc++;
if (num_tc > IEEE_8021QAZ_MAX_TCS)
return -EINVAL;
/* update TC hardware mapping if necessary */
if (num_tc != netdev_get_num_tc(dev)) {
err = fm10k_setup_tc(dev, num_tc);
if (err)
return err;
}
/* update priority mapping */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
netdev_set_prio_tc_map(dev, i, ets->prio_tc[i]);
return 0;
}
/**
* fm10k_dcbnl_ieee_getpfc - get the PFC configuration for the device
* @dev: netdev interface for the device
* @pfc: PFC structure to push configuration to
**/
static int fm10k_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
struct fm10k_intfc *interface = netdev_priv(dev);
/* record flow control max count and state of TCs */
pfc->pfc_cap = IEEE_8021QAZ_MAX_TCS;
pfc->pfc_en = interface->pfc_en;
return 0;
}
/**
* fm10k_dcbnl_ieee_setpfc - set the PFC configuration for the device
* @dev: netdev interface for the device
* @pfc: PFC structure to pull configuration from
**/
static int fm10k_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc)
{
struct fm10k_intfc *interface = netdev_priv(dev);
/* record PFC configuration to interface */
interface->pfc_en = pfc->pfc_en;
/* if we are running update the drop_en state for all queues */
if (netif_running(dev))
fm10k_update_rx_drop_en(interface);
return 0;
}
/**
* fm10k_dcbnl_ieee_getdcbx - get the DCBX configuration for the device
* @dev: netdev interface for the device
*
* Returns that we support only IEEE DCB for this interface
**/
static u8 fm10k_dcbnl_getdcbx(struct net_device *dev)
{
return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
}
/**
* fm10k_dcbnl_ieee_setdcbx - get the DCBX configuration for the device
* @dev: netdev interface for the device
* @mode: new mode for this device
*
* Returns error on attempt to enable anything but IEEE DCB for this interface
**/
static u8 fm10k_dcbnl_setdcbx(struct net_device *dev, u8 mode)
{
return (mode != (DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE)) ? 1 : 0;
}
static const struct dcbnl_rtnl_ops fm10k_dcbnl_ops = {
.ieee_getets = fm10k_dcbnl_ieee_getets,
.ieee_setets = fm10k_dcbnl_ieee_setets,
.ieee_getpfc = fm10k_dcbnl_ieee_getpfc,
.ieee_setpfc = fm10k_dcbnl_ieee_setpfc,
.getdcbx = fm10k_dcbnl_getdcbx,
.setdcbx = fm10k_dcbnl_setdcbx,
};
#endif /* CONFIG_DCB */
/**
* fm10k_dcbnl_set_ops - Configures dcbnl ops pointer for netdev
* @dev: netdev interface for the device
*
* Enables PF for DCB by assigning DCBNL ops pointer.
**/
void fm10k_dcbnl_set_ops(struct net_device *dev)
{
#ifdef CONFIG_DCB
struct fm10k_intfc *interface = netdev_priv(dev);
struct fm10k_hw *hw = &interface->hw;
if (hw->mac.type == fm10k_mac_pf)
dev->dcbnl_ops = &fm10k_dcbnl_ops;
#endif /* CONFIG_DCB */
}
/* Intel Ethernet Switch Host Interface Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifdef CONFIG_DEBUG_FS
#include "fm10k.h"
#include <linux/debugfs.h>
#include <linux/seq_file.h>
static struct dentry *dbg_root;
/* Descriptor Seq Functions */
static void *fm10k_dbg_desc_seq_start(struct seq_file *s, loff_t *pos)
{
struct fm10k_ring *ring = s->private;
return (*pos < ring->count) ? pos : NULL;
}
static void *fm10k_dbg_desc_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
struct fm10k_ring *ring = s->private;
return (++(*pos) < ring->count) ? pos : NULL;
}
static void fm10k_dbg_desc_seq_stop(struct seq_file *s, void *v)
{
/* Do nothing. */
}
static void fm10k_dbg_desc_break(struct seq_file *s, int i)
{
while (i--)
seq_puts(s, "-");
seq_puts(s, "\n");
}
static int fm10k_dbg_tx_desc_seq_show(struct seq_file *s, void *v)
{
struct fm10k_ring *ring = s->private;
int i = *(loff_t *)v;
static const char tx_desc_hdr[] =
"DES BUFFER_ADDRESS LENGTH VLAN MSS HDRLEN FLAGS\n";
/* Generate header */
if (!i) {
seq_printf(s, tx_desc_hdr);
fm10k_dbg_desc_break(s, sizeof(tx_desc_hdr) - 1);
}
/* Validate descriptor allocation */
if (!ring->desc) {
seq_printf(s, "%03X Descriptor ring not allocated.\n", i);
} else {
struct fm10k_tx_desc *txd = FM10K_TX_DESC(ring, i);
seq_printf(s, "%03X %#018llx %#06x %#06x %#06x %#06x %#04x\n",
i, txd->buffer_addr, txd->buflen, txd->vlan,
txd->mss, txd->hdrlen, txd->flags);
}
return 0;
}
static int fm10k_dbg_rx_desc_seq_show(struct seq_file *s, void *v)
{
struct fm10k_ring *ring = s->private;
int i = *(loff_t *)v;
static const char rx_desc_hdr[] =
"DES DATA RSS STATERR LENGTH VLAN DGLORT SGLORT TIMESTAMP\n";
/* Generate header */
if (!i) {
seq_printf(s, rx_desc_hdr);
fm10k_dbg_desc_break(s, sizeof(rx_desc_hdr) - 1);
}
/* Validate descriptor allocation */
if (!ring->desc) {
seq_printf(s, "%03X Descriptor ring not allocated.\n", i);
} else {
union fm10k_rx_desc *rxd = FM10K_RX_DESC(ring, i);
seq_printf(s,
"%03X %#010x %#010x %#010x %#06x %#06x %#06x %#06x %#018llx\n",
i, rxd->d.data, rxd->d.rss, rxd->d.staterr,
rxd->w.length, rxd->w.vlan, rxd->w.dglort,
rxd->w.sglort, rxd->q.timestamp);
}
return 0;
}
static const struct seq_operations fm10k_dbg_tx_desc_seq_ops = {
.start = fm10k_dbg_desc_seq_start,
.next = fm10k_dbg_desc_seq_next,
.stop = fm10k_dbg_desc_seq_stop,
.show = fm10k_dbg_tx_desc_seq_show,
};
static const struct seq_operations fm10k_dbg_rx_desc_seq_ops = {
.start = fm10k_dbg_desc_seq_start,
.next = fm10k_dbg_desc_seq_next,
.stop = fm10k_dbg_desc_seq_stop,
.show = fm10k_dbg_rx_desc_seq_show,
};
static int fm10k_dbg_desc_open(struct inode *inode, struct file *filep)
{
struct fm10k_ring *ring = inode->i_private;
struct fm10k_q_vector *q_vector = ring->q_vector;
const struct seq_operations *desc_seq_ops;
int err;
if (ring < q_vector->rx.ring)
desc_seq_ops = &fm10k_dbg_tx_desc_seq_ops;
else
desc_seq_ops = &fm10k_dbg_rx_desc_seq_ops;
err = seq_open(filep, desc_seq_ops);
if (err)
return err;
((struct seq_file *)filep->private_data)->private = ring;
return 0;
}
static const struct file_operations fm10k_dbg_desc_fops = {
.owner = THIS_MODULE,
.open = fm10k_dbg_desc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
/**
* fm10k_dbg_q_vector_init - setup debugfs for the q_vectors
* @q_vector: q_vector to allocate directories for
*
* A folder is created for each q_vector found. In each q_vector
* folder, a debugfs file is created for each tx and rx ring
* allocated to the q_vector.
**/
void fm10k_dbg_q_vector_init(struct fm10k_q_vector *q_vector)
{
struct fm10k_intfc *interface = q_vector->interface;
char name[16];
int i;
if (!interface->dbg_intfc)
return;
/* Generate a folder for each q_vector */
sprintf(name, "q_vector.%03d", q_vector->v_idx);
q_vector->dbg_q_vector = debugfs_create_dir(name, interface->dbg_intfc);
if (!q_vector->dbg_q_vector)
return;
/* Generate a file for each rx ring in the q_vector */
for (i = 0; i < q_vector->tx.count; i++) {
struct fm10k_ring *ring = &q_vector->tx.ring[i];
sprintf(name, "tx_ring.%03d", ring->queue_index);
debugfs_create_file(name, 0600,
q_vector->dbg_q_vector, ring,
&fm10k_dbg_desc_fops);
}
/* Generate a file for each rx ring in the q_vector */
for (i = 0; i < q_vector->rx.count; i++) {
struct fm10k_ring *ring = &q_vector->rx.ring[i];
sprintf(name, "rx_ring.%03d", ring->queue_index);
debugfs_create_file(name, 0600,
q_vector->dbg_q_vector, ring,
&fm10k_dbg_desc_fops);
}
}
/**
* fm10k_dbg_free_q_vector_dir - setup debugfs for the q_vectors
* @q_vector: q_vector to allocate directories for
**/
void fm10k_dbg_q_vector_exit(struct fm10k_q_vector *q_vector)
{
struct fm10k_intfc *interface = q_vector->interface;
if (interface->dbg_intfc)
debugfs_remove_recursive(q_vector->dbg_q_vector);
q_vector->dbg_q_vector = NULL;
}
/**
* fm10k_dbg_intfc_init - setup the debugfs directory for the intferface
* @interface: the interface that is starting up
**/
void fm10k_dbg_intfc_init(struct fm10k_intfc *interface)
{
const char *name = pci_name(interface->pdev);
if (dbg_root)
interface->dbg_intfc = debugfs_create_dir(name, dbg_root);
}
/**
* fm10k_dbg_intfc_exit - clean out the interface's debugfs entries
* @interface: the interface that is stopping
**/
void fm10k_dbg_intfc_exit(struct fm10k_intfc *interface)
{
if (dbg_root)
debugfs_remove_recursive(interface->dbg_intfc);
interface->dbg_intfc = NULL;
}
/**
* fm10k_dbg_init - start up debugfs for the driver
**/
void fm10k_dbg_init(void)
{
dbg_root = debugfs_create_dir(fm10k_driver_name, NULL);
}
/**
* fm10k_dbg_exit - clean out the driver's debugfs entries
**/
void fm10k_dbg_exit(void)
{
debugfs_remove_recursive(dbg_root);
dbg_root = NULL;
}
#endif /* CONFIG_DEBUG_FS */
此差异已折叠。
/* Intel Ethernet Switch Host Interface Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#include "fm10k.h"
#include "fm10k_vf.h"
#include "fm10k_pf.h"
static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results,
struct fm10k_mbx_info *mbx)
{
struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
struct fm10k_intfc *interface = hw->back;
struct pci_dev *pdev = interface->pdev;
dev_err(&pdev->dev, "Unknown message ID %u on VF %d\n",
**results & FM10K_TLV_ID_MASK, vf_info->vf_idx);
return fm10k_tlv_msg_error(hw, results, mbx);
}
static const struct fm10k_msg_data iov_mbx_data[] = {
FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf),
FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error),
};
s32 fm10k_iov_event(struct fm10k_intfc *interface)
{
struct fm10k_hw *hw = &interface->hw;
struct fm10k_iov_data *iov_data;
s64 mbicr, vflre;
int i;
/* if there is no iov_data then there is no mailboxes to process */
if (!ACCESS_ONCE(interface->iov_data))
return 0;
rcu_read_lock();
iov_data = interface->iov_data;
/* check again now that we are in the RCU block */
if (!iov_data)
goto read_unlock;
if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR))
goto process_mbx;
/* read VFLRE to determine if any VFs have been reset */
do {
vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(0));
vflre <<= 32;
vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(1));
vflre = (vflre << 32) | (vflre >> 32);
vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0));
i = iov_data->num_vfs;
for (vflre <<= 64 - i; vflre && i--; vflre += vflre) {
struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
if (vflre >= 0)
continue;
hw->iov.ops.reset_resources(hw, vf_info);
vf_info->mbx.ops.connect(hw, &vf_info->mbx);
}
} while (i != iov_data->num_vfs);
process_mbx:
/* read MBICR to determine which VFs require attention */
mbicr = fm10k_read_reg(hw, FM10K_MBICR(1));
mbicr <<= 32;
mbicr |= fm10k_read_reg(hw, FM10K_MBICR(0));
i = iov_data->next_vf_mbx ? : iov_data->num_vfs;
for (mbicr <<= 64 - i; i--; mbicr += mbicr) {
struct fm10k_mbx_info *mbx = &iov_data->vf_info[i].mbx;
if (mbicr >= 0)
continue;
if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU))
break;
mbx->ops.process(hw, mbx);
}
if (i >= 0) {
iov_data->next_vf_mbx = i + 1;
} else if (iov_data->next_vf_mbx) {
iov_data->next_vf_mbx = 0;
goto process_mbx;
}
read_unlock:
rcu_read_unlock();
return 0;
}
s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
{
struct fm10k_hw *hw = &interface->hw;
struct fm10k_iov_data *iov_data;
int i;
/* if there is no iov_data then there is no mailboxes to process */
if (!ACCESS_ONCE(interface->iov_data))
return 0;
rcu_read_lock();
iov_data = interface->iov_data;
/* check again now that we are in the RCU block */
if (!iov_data)
goto read_unlock;
/* lock the mailbox for transmit and receive */
fm10k_mbx_lock(interface);
process_mbx:
for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) {
struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
struct fm10k_mbx_info *mbx = &vf_info->mbx;
u16 glort = vf_info->glort;
/* verify port mapping is valid, if not reset port */
if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort))
hw->iov.ops.reset_lport(hw, vf_info);
/* reset VFs that have mailbox timed out */
if (!mbx->timeout) {
hw->iov.ops.reset_resources(hw, vf_info);
mbx->ops.connect(hw, mbx);
}
/* no work pending, then just continue */
if (mbx->ops.tx_complete(mbx) && !mbx->ops.rx_ready(mbx))
continue;
/* guarantee we have free space in the SM mailbox */
if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU))
break;
/* cleanup mailbox and process received messages */
mbx->ops.process(hw, mbx);
}
if (i >= 0) {
iov_data->next_vf_mbx = i + 1;
} else if (iov_data->next_vf_mbx) {
iov_data->next_vf_mbx = 0;
goto process_mbx;
}
/* free the lock */
fm10k_mbx_unlock(interface);
read_unlock:
rcu_read_unlock();
return 0;
}
void fm10k_iov_suspend(struct pci_dev *pdev)
{
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
int num_vfs, i;
/* pull out num_vfs from iov_data */
num_vfs = iov_data ? iov_data->num_vfs : 0;
/* shut down queue mapping for VFs */
fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_vf_rss),
FM10K_DGLORTMAP_NONE);
/* Stop any active VFs and reset their resources */
for (i = 0; i < num_vfs; i++) {
struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
hw->iov.ops.reset_resources(hw, vf_info);
hw->iov.ops.reset_lport(hw, vf_info);
}
}
int fm10k_iov_resume(struct pci_dev *pdev)
{
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_dglort_cfg dglort = { 0 };
struct fm10k_hw *hw = &interface->hw;
int num_vfs, i;
/* pull out num_vfs from iov_data */
num_vfs = iov_data ? iov_data->num_vfs : 0;
/* return error if iov_data is not already populated */
if (!iov_data)
return -ENOMEM;
/* allocate hardware resources for the VFs */
hw->iov.ops.assign_resources(hw, num_vfs, num_vfs);
/* configure DGLORT mapping for RSS */
dglort.glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE;
dglort.idx = fm10k_dglort_vf_rss;
dglort.inner_rss = 1;
dglort.rss_l = fls(fm10k_queues_per_pool(hw) - 1);
dglort.queue_b = fm10k_vf_queue_index(hw, 0);
dglort.vsi_l = fls(hw->iov.total_vfs - 1);
dglort.vsi_b = 1;
hw->mac.ops.configure_dglort_map(hw, &dglort);
/* assign resources to the device */
for (i = 0; i < num_vfs; i++) {
struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
/* allocate all but the last GLORT to the VFs */
if (i == ((~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT))
break;
/* assign GLORT to VF, and restrict it to multicast */
hw->iov.ops.set_lport(hw, vf_info, i,
FM10K_VF_FLAG_MULTI_CAPABLE);
/* assign our default vid to the VF following reset */
vf_info->sw_vid = hw->mac.default_vid;
/* mailbox is disconnected so we don't send a message */
hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
/* now we are ready so we can connect */
vf_info->mbx.ops.connect(hw, &vf_info->mbx);
}
return 0;
}
s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid)
{
struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
struct fm10k_vf_info *vf_info;
u16 vf_idx = (glort - hw->mac.dglort_map) & FM10K_DGLORTMAP_NONE;
/* no IOV support, not our message to process */
if (!iov_data)
return FM10K_ERR_PARAM;
/* glort outside our range, not our message to process */
if (vf_idx >= iov_data->num_vfs)
return FM10K_ERR_PARAM;
/* determine if an update has occured and if so notify the VF */
vf_info = &iov_data->vf_info[vf_idx];
if (vf_info->sw_vid != pvid) {
vf_info->sw_vid = pvid;
hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
}
return 0;
}
static void fm10k_iov_free_data(struct pci_dev *pdev)
{
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
if (!interface->iov_data)
return;
/* reclaim hardware resources */
fm10k_iov_suspend(pdev);
/* drop iov_data from interface */
kfree_rcu(interface->iov_data, rcu);
interface->iov_data = NULL;
}
static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
{
struct fm10k_intfc *interface = pci_get_drvdata(pdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
size_t size;
int i, err;
/* return error if iov_data is already populated */
if (iov_data)
return -EBUSY;
/* The PF should always be able to assign resources */
if (!hw->iov.ops.assign_resources)
return -ENODEV;
/* nothing to do if no VFs are requested */
if (!num_vfs)
return 0;
/* allocate memory for VF storage */
size = offsetof(struct fm10k_iov_data, vf_info[num_vfs]);
iov_data = kzalloc(size, GFP_KERNEL);
if (!iov_data)
return -ENOMEM;
/* record number of VFs */
iov_data->num_vfs = num_vfs;
/* loop through vf_info structures initializing each entry */
for (i = 0; i < num_vfs; i++) {
struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
/* Record VF VSI value */
vf_info->vsi = i + 1;
vf_info->vf_idx = i;
/* initialize mailbox memory */
err = fm10k_pfvf_mbx_init(hw, &vf_info->mbx, iov_mbx_data, i);
if (err) {
dev_err(&pdev->dev,
"Unable to initialize SR-IOV mailbox\n");
kfree(iov_data);
return err;
}
}
/* assign iov_data to interface */
interface->iov_data = iov_data;
/* allocate hardware resources for the VFs */
fm10k_iov_resume(pdev);
return 0;
}
void fm10k_iov_disable(struct pci_dev *pdev)
{
if (pci_num_vf(pdev) && pci_vfs_assigned(pdev))
dev_err(&pdev->dev,
"Cannot disable SR-IOV while VFs are assigned\n");
else
pci_disable_sriov(pdev);
fm10k_iov_free_data(pdev);
}
static void fm10k_disable_aer_comp_abort(struct pci_dev *pdev)
{
u32 err_sev;
int pos;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
if (!pos)
return;
pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &err_sev);
err_sev &= ~PCI_ERR_UNC_COMP_ABORT;
pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, err_sev);
}
int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
{
int current_vfs = pci_num_vf(pdev);
int err = 0;
if (current_vfs && pci_vfs_assigned(pdev)) {
dev_err(&pdev->dev,
"Cannot modify SR-IOV while VFs are assigned\n");
num_vfs = current_vfs;
} else {
pci_disable_sriov(pdev);
fm10k_iov_free_data(pdev);
}
/* allocate resources for the VFs */
err = fm10k_iov_alloc_data(pdev, num_vfs);
if (err)
return err;
/* allocate VFs if not already allocated */
if (num_vfs && (num_vfs != current_vfs)) {
/* Disable completer abort error reporting as
* the VFs can trigger this any time they read a queue
* that they don't own.
*/
fm10k_disable_aer_comp_abort(pdev);
err = pci_enable_sriov(pdev, num_vfs);
if (err) {
dev_err(&pdev->dev,
"Enable PCI SR-IOV failed: %d\n", err);
return err;
}
}
return num_vfs;
}
int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
struct fm10k_vf_info *vf_info;
/* verify SR-IOV is active and that vf idx is valid */
if (!iov_data || vf_idx >= iov_data->num_vfs)
return -EINVAL;
/* verify MAC addr is valid */
if (!is_zero_ether_addr(mac) && !is_valid_ether_addr(mac))
return -EINVAL;
/* record new MAC address */
vf_info = &iov_data->vf_info[vf_idx];
ether_addr_copy(vf_info->mac, mac);
/* assigning the MAC will send a mailbox message so lock is needed */
fm10k_mbx_lock(interface);
/* assign MAC address to VF */
hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
fm10k_mbx_unlock(interface);
return 0;
}
int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
u8 qos)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
struct fm10k_vf_info *vf_info;
/* verify SR-IOV is active and that vf idx is valid */
if (!iov_data || vf_idx >= iov_data->num_vfs)
return -EINVAL;
/* QOS is unsupported and VLAN IDs accepted range 0-4094 */
if (qos || (vid > (VLAN_VID_MASK - 1)))
return -EINVAL;
vf_info = &iov_data->vf_info[vf_idx];
/* exit if there is nothing to do */
if (vf_info->pf_vid == vid)
return 0;
/* record default VLAN ID for VF */
vf_info->pf_vid = vid;
/* assigning the VLAN will send a mailbox message so lock is needed */
fm10k_mbx_lock(interface);
/* Clear the VLAN table for the VF */
hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false);
/* Update VF assignment and trigger reset */
hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
fm10k_mbx_unlock(interface);
return 0;
}
int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx, int unused,
int rate)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_hw *hw = &interface->hw;
/* verify SR-IOV is active and that vf idx is valid */
if (!iov_data || vf_idx >= iov_data->num_vfs)
return -EINVAL;
/* rate limit cannot be less than 10Mbs or greater than link speed */
if (rate && ((rate < FM10K_VF_TC_MIN) || rate > FM10K_VF_TC_MAX))
return -EINVAL;
/* store values */
iov_data->vf_info[vf_idx].rate = rate;
/* update hardware configuration */
hw->iov.ops.configure_tc(hw, vf_idx, rate);
return 0;
}
int fm10k_ndo_get_vf_config(struct net_device *netdev,
int vf_idx, struct ifla_vf_info *ivi)
{
struct fm10k_intfc *interface = netdev_priv(netdev);
struct fm10k_iov_data *iov_data = interface->iov_data;
struct fm10k_vf_info *vf_info;
/* verify SR-IOV is active and that vf idx is valid */
if (!iov_data || vf_idx >= iov_data->num_vfs)
return -EINVAL;
vf_info = &iov_data->vf_info[vf_idx];
ivi->vf = vf_idx;
ivi->max_tx_rate = vf_info->rate;
ivi->min_tx_rate = 0;
ether_addr_copy(ivi->mac, vf_info->mac);
ivi->vlan = vf_info->pf_vid;
ivi->qos = 0;
return 0;
}
此差异已折叠。
此差异已折叠。
/* Intel Ethernet Switch Host Interface Driver
* Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Contact Information:
* e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*/
#ifndef _FM10K_MBX_H_
#define _FM10K_MBX_H_
/* forward declaration */
struct fm10k_mbx_info;
#include "fm10k_type.h"
#include "fm10k_tlv.h"
/* PF Mailbox Registers */
#define FM10K_MBMEM(_n) ((_n) + 0x18000)
#define FM10K_MBMEM_VF(_n, _m) (((_n) * 0x10) + (_m) + 0x18000)
#define FM10K_MBMEM_SM(_n) ((_n) + 0x18400)
#define FM10K_MBMEM_PF(_n) ((_n) + 0x18600)
/* XOR provides means of switching from Tx to Rx FIFO */
#define FM10K_MBMEM_PF_XOR (FM10K_MBMEM_SM(0) ^ FM10K_MBMEM_PF(0))
#define FM10K_MBX(_n) ((_n) + 0x18800)
#define FM10K_MBX_REQ 0x00000002
#define FM10K_MBX_ACK 0x00000004
#define FM10K_MBX_REQ_INTERRUPT 0x00000008
#define FM10K_MBX_ACK_INTERRUPT 0x00000010
#define FM10K_MBX_INTERRUPT_ENABLE 0x00000020
#define FM10K_MBX_INTERRUPT_DISABLE 0x00000040
#define FM10K_MBICR(_n) ((_n) + 0x18840)
#define FM10K_GMBX 0x18842
/* VF Mailbox Registers */
#define FM10K_VFMBX 0x00010
#define FM10K_VFMBMEM(_n) ((_n) + 0x00020)
#define FM10K_VFMBMEM_LEN 16
#define FM10K_VFMBMEM_VF_XOR (FM10K_VFMBMEM_LEN / 2)
/* Delays/timeouts */
#define FM10K_MBX_DISCONNECT_TIMEOUT 500
#define FM10K_MBX_POLL_DELAY 19
#define FM10K_MBX_INT_DELAY 20
/* PF/VF Mailbox state machine
*
* +----------+ connect() +----------+
* | CLOSED | --------------> | CONNECT |
* +----------+ +----------+
* ^ ^ |
* | rcv: rcv: | | rcv:
* | Connect Disconnect | | Connect
* | Disconnect Error | | Data
* | | |
* | | V
* +----------+ disconnect() +----------+
* |DISCONNECT| <-------------- | OPEN |
* +----------+ +----------+
*
* The diagram above describes the PF/VF mailbox state machine. There
* are four main states to this machine.
* Closed: This state represents a mailbox that is in a standby state
* with interrupts disabled. In this state the mailbox should not
* read the mailbox or write any data. The only means of exiting
* this state is for the system to make the connect() call for the
* mailbox, it will then transition to the connect state.
* Connect: In this state the mailbox is seeking a connection. It will
* post a connect message with no specified destination and will
* wait for a reply from the other side of the mailbox. This state
* is exited when either a connect with the local mailbox as the
* destination is received or when a data message is received with
* a valid sequence number.
* Open: In this state the mailbox is able to transfer data between the local
* entity and the remote. It will fall back to connect in the event of
* receiving either an error message, or a disconnect message. It will
* transition to disconnect on a call to disconnect();
* Disconnect: In this state the mailbox is attempting to gracefully terminate
* the connection. It will do so at the first point where it knows
* that the remote endpoint is either done sending, or when the
* remote endpoint has fallen back into connect.
*/
enum fm10k_mbx_state {
FM10K_STATE_CLOSED,
FM10K_STATE_CONNECT,
FM10K_STATE_OPEN,
FM10K_STATE_DISCONNECT,
};
/* PF/VF Mailbox header format
* 3 2 1 0
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | Size/Err_no/CRC | Rsvd0 | Head | Tail | Type |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*
* The layout above describes the format for the header used in the PF/VF
* mailbox. The header is broken out into the following fields:
* Type: There are 4 supported message types
* 0x8: Data header - used to transport message data
* 0xC: Connect header - used to establish connection
* 0xD: Disconnect header - used to tear down a connection
* 0xE: Error header - used to address message exceptions
* Tail: Tail index for local FIFO
* Tail index actually consists of two parts. The MSB of
* the head is a loop tracker, it is 0 on an even numbered
* loop through the FIFO, and 1 on the odd numbered loops.
* To get the actual mailbox offset based on the tail it
* is necessary to add bit 3 to bit 0 and clear bit 3. This
* gives us a valid range of 0x1 - 0xE.
* Head: Head index for remote FIFO
* Head index follows the same format as the tail index.
* Rsvd0: Reserved 0 portion of the mailbox header
* CRC: Running CRC for all data since connect plus current message header
* Size: Maximum message size - Applies only to connect headers
* The maximum message size is provided during connect to avoid
* jamming the mailbox with messages that do not fit.
* Err_no: Error number - Applies only to error headers
* The error number provides a indication of the type of error
* experienced.
*/
/* macros for retriving and setting header values */
#define FM10K_MSG_HDR_MASK(name) \
((0x1u << FM10K_MSG_##name##_SIZE) - 1)
#define FM10K_MSG_HDR_FIELD_SET(value, name) \
(((u32)(value) & FM10K_MSG_HDR_MASK(name)) << FM10K_MSG_##name##_SHIFT)
#define FM10K_MSG_HDR_FIELD_GET(value, name) \
((u16)((value) >> FM10K_MSG_##name##_SHIFT) & FM10K_MSG_HDR_MASK(name))
/* offsets shared between all headers */
#define FM10K_MSG_TYPE_SHIFT 0
#define FM10K_MSG_TYPE_SIZE 4
#define FM10K_MSG_TAIL_SHIFT 4
#define FM10K_MSG_TAIL_SIZE 4
#define FM10K_MSG_HEAD_SHIFT 8
#define FM10K_MSG_HEAD_SIZE 4
#define FM10K_MSG_RSVD0_SHIFT 12
#define FM10K_MSG_RSVD0_SIZE 4
/* offsets for data/disconnect headers */
#define FM10K_MSG_CRC_SHIFT 16
#define FM10K_MSG_CRC_SIZE 16
/* offsets for connect headers */
#define FM10K_MSG_CONNECT_SIZE_SHIFT 16
#define FM10K_MSG_CONNECT_SIZE_SIZE 16
/* offsets for error headers */
#define FM10K_MSG_ERR_NO_SHIFT 16
#define FM10K_MSG_ERR_NO_SIZE 16
enum fm10k_msg_type {
FM10K_MSG_DATA = 0x8,
FM10K_MSG_CONNECT = 0xC,
FM10K_MSG_DISCONNECT = 0xD,
FM10K_MSG_ERROR = 0xE,
};
/* HNI/SM Mailbox FIFO format
* 3 2 1 0
* 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
* +-------+-----------------------+-------+-----------------------+
* | Error | Remote Head |Version| Local Tail |
* +-------+-----------------------+-------+-----------------------+
* | |
* . Local FIFO Data .
* . .
* +-------+-----------------------+-------+-----------------------+
*
* The layout above describes the format for the FIFOs used by the host
* network interface and the switch manager to communicate messages back
* and forth. Both the HNI and the switch maintain one such FIFO. The
* layout in memory has the switch manager FIFO followed immediately by
* the HNI FIFO. For this reason I am using just the pointer to the
* HNI FIFO in the mailbox ops as the offset between the two is fixed.
*
* The header for the FIFO is broken out into the following fields:
* Local Tail: Offset into FIFO region for next DWORD to write.
* Version: Version info for mailbox, only values of 0/1 are supported.
* Remote Head: Offset into remote FIFO to indicate how much we have read.
* Error: Error indication, values TBD.
*/
/* version number for switch manager mailboxes */
#define FM10K_SM_MBX_VERSION 1
#define FM10K_SM_MBX_FIFO_LEN (FM10K_MBMEM_PF_XOR - 1)
/* offsets shared between all SM FIFO headers */
#define FM10K_MSG_SM_TAIL_SHIFT 0
#define FM10K_MSG_SM_TAIL_SIZE 12
#define FM10K_MSG_SM_VER_SHIFT 12
#define FM10K_MSG_SM_VER_SIZE 4
#define FM10K_MSG_SM_HEAD_SHIFT 16
#define FM10K_MSG_SM_HEAD_SIZE 12
#define FM10K_MSG_SM_ERR_SHIFT 28
#define FM10K_MSG_SM_ERR_SIZE 4
/* All error messages returned by mailbox functions
* The value -511 is 0xFE01 in hex. The idea is to order the errors
* from 0xFE01 - 0xFEFF so error codes are easily visible in the mailbox
* messages. This also helps to avoid error number collisions as Linux
* doesn't appear to use error numbers 256 - 511.
*/
#define FM10K_MBX_ERR(_n) ((_n) - 512)
#define FM10K_MBX_ERR_NO_MBX FM10K_MBX_ERR(0x01)
#define FM10K_MBX_ERR_NO_SPACE FM10K_MBX_ERR(0x03)
#define FM10K_MBX_ERR_TAIL FM10K_MBX_ERR(0x05)
#define FM10K_MBX_ERR_HEAD FM10K_MBX_ERR(0x06)
#define FM10K_MBX_ERR_SRC FM10K_MBX_ERR(0x08)
#define FM10K_MBX_ERR_TYPE FM10K_MBX_ERR(0x09)
#define FM10K_MBX_ERR_SIZE FM10K_MBX_ERR(0x0B)
#define FM10K_MBX_ERR_BUSY FM10K_MBX_ERR(0x0C)
#define FM10K_MBX_ERR_RSVD0 FM10K_MBX_ERR(0x0E)
#define FM10K_MBX_ERR_CRC FM10K_MBX_ERR(0x0F)
#define FM10K_MBX_CRC_SEED 0xFFFF
struct fm10k_mbx_ops {
s32 (*connect)(struct fm10k_hw *, struct fm10k_mbx_info *);
void (*disconnect)(struct fm10k_hw *, struct fm10k_mbx_info *);
bool (*rx_ready)(struct fm10k_mbx_info *);
bool (*tx_ready)(struct fm10k_mbx_info *, u16);
bool (*tx_complete)(struct fm10k_mbx_info *);
s32 (*enqueue_tx)(struct fm10k_hw *, struct fm10k_mbx_info *,
const u32 *);
s32 (*process)(struct fm10k_hw *, struct fm10k_mbx_info *);
s32 (*register_handlers)(struct fm10k_mbx_info *,
const struct fm10k_msg_data *);
};
struct fm10k_mbx_fifo {
u32 *buffer;
u16 head;
u16 tail;
u16 size;
};
/* size of buffer to be stored in mailbox for FIFOs */
#define FM10K_MBX_TX_BUFFER_SIZE 512
#define FM10K_MBX_RX_BUFFER_SIZE 128
#define FM10K_MBX_BUFFER_SIZE \
(FM10K_MBX_TX_BUFFER_SIZE + FM10K_MBX_RX_BUFFER_SIZE)
/* minimum and maximum message size in dwords */
#define FM10K_MBX_MSG_MAX_SIZE \
((FM10K_MBX_TX_BUFFER_SIZE - 1) & (FM10K_MBX_RX_BUFFER_SIZE - 1))
#define FM10K_VFMBX_MSG_MTU ((FM10K_VFMBMEM_LEN / 2) - 1)
#define FM10K_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */
#define FM10K_MBX_INIT_DELAY 500 /* microseconds between retries */
struct fm10k_mbx_info {
/* function pointers for mailbox operations */
struct fm10k_mbx_ops ops;
const struct fm10k_msg_data *msg_data;
/* message FIFOs */
struct fm10k_mbx_fifo rx;
struct fm10k_mbx_fifo tx;
/* delay for handling timeouts */
u32 timeout;
u32 udelay;
/* mailbox state info */
u32 mbx_reg, mbmem_reg, mbx_lock, mbx_hdr;
u16 max_size, mbmem_len;
u16 tail, tail_len, pulled;
u16 head, head_len, pushed;
u16 local, remote;
enum fm10k_mbx_state state;
/* result of last mailbox test */
s32 test_result;
/* statistics */
u64 tx_busy;
u64 tx_dropped;
u64 tx_messages;
u64 tx_dwords;
u64 rx_messages;
u64 rx_dwords;
u64 rx_parse_err;
/* Buffer to store messages */
u32 buffer[FM10K_MBX_BUFFER_SIZE];
};
s32 fm10k_pfvf_mbx_init(struct fm10k_hw *, struct fm10k_mbx_info *,
const struct fm10k_msg_data *, u8);
s32 fm10k_sm_mbx_init(struct fm10k_hw *, struct fm10k_mbx_info *,
const struct fm10k_msg_data *);
#endif /* _FM10K_MBX_H_ */
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册