提交 5db6b777 编写于 作者: G Greg Kroah-Hartman

Staging: add sxg network driver

This is the first rough cut at a driver for the Alacritech SLIC
Technology Non-Accelerated 10Gbe network driver

TODO:
  - lindent the code
  - remove typedefs
  - remove wrappers
  - checkpatch.pl cleanups
  - new functionality that the card needs

Cc: Christopher Harrer <charrer@alacritech.com>
Cc: Michael Miles <mmiles@alacritech.com>
Cc: Christopher Harrer <charrer@alacritech.com>
Signed-off-by: NGreg Kroah-Hartman <gregkh@suse.de>
上级 4d6f6af8
......@@ -27,4 +27,6 @@ source "drivers/staging/et131x/Kconfig"
source "drivers/staging/slicoss/Kconfig"
source "drivers/staging/sxg/Kconfig"
endif # STAGING
......@@ -2,3 +2,4 @@
obj-$(CONFIG_ET131X) += et131x/
obj-$(CONFIG_SLICOSS) += slicoss/
obj-$(CONFIG_SXG) += sxg/
config SXG
tristate "Alacritech SLIC Technology Non-Accelerated 10Gbe support"
depends on PCI && NETDEV_10000
default n
help
This driver supports the Alacritech SLIC Technology Non-Accelerated
10Gbe network cards.
To compile this driver as a module, choose
M here: the module will be called sxg.
obj-$(CONFIG_SXG) += sxg.o
This is the rough cut at a driver for the Alacritech SLIC Technology
Non-Accelerated 10Gbe network driver.
TODO:
- lindent the code
- remove typedefs
- remove wrappers
- checkpatch.pl cleanups
- new functionality that the card needs
Please send patches to:
Greg Kroah-Hartman <gregkh@suse.de>
for any cleanups that you do to this driver.
因为 它太大了无法显示 source diff 。你可以改为 查看blob
/**************************************************************************
*
* Copyright (C) 2000-2008 Alacritech, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation
* are those of the authors and should not be interpreted as representing
* official policies, either expressed or implied, of Alacritech, Inc.
*
**************************************************************************/
/*
* FILENAME: sxg.c
*
* The SXG driver for Alacritech's 10Gbe products.
*
* NOTE: This is the standard, non-accelerated version of Alacritech's
* IS-NIC driver.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/pci.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/dma-mapping.h>
#include <linux/mii.h>
#define SLIC_DUMP_ENABLED 0
#define SLIC_GET_STATS_ENABLED 0
#define LINUX_FREES_ADAPTER_RESOURCES 1
#define SXG_OFFLOAD_IP_CHECKSUM 0
#define SXG_POWER_MANAGEMENT_ENABLED 0
#define VPCI 0
#define DBG 1
#define ATK_DEBUG 1
#include "sxg_os.h"
#include "sxghw.h"
#include "sxghif.h"
#include "sxg.h"
#include "sxgdbg.h"
#include "sxgphycode.h"
#include "saharadbgdownload.h"
static int sxg_allocate_buffer_memory(p_adapter_t adapter, u32 Size, SXG_BUFFER_TYPE BufferType);
static void sxg_allocate_rcvblock_complete(p_adapter_t adapter, void * RcvBlock, dma_addr_t PhysicalAddress, u32 Length);
static void sxg_allocate_sgl_buffer_complete(p_adapter_t adapter, PSXG_SCATTER_GATHER SxgSgl, dma_addr_t PhysicalAddress, u32 Length);
static void sxg_mcast_init_crc32(void);
static int sxg_entry_open(p_net_device dev);
static int sxg_entry_halt(p_net_device dev);
static int sxg_ioctl(p_net_device dev, struct ifreq *rq, int cmd);
static int sxg_send_packets(struct sk_buff *skb, p_net_device dev);
static int sxg_transmit_packet(p_adapter_t adapter, struct sk_buff *skb);
static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl);
static void sxg_handle_interrupt(p_adapter_t adapter);
static int sxg_process_isr(p_adapter_t adapter, u32 MessageId);
static u32 sxg_process_event_queue(p_adapter_t adapter, u32 RssId);
static void sxg_complete_slow_send(p_adapter_t adapter);
static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event);
static void sxg_process_rcv_error(p_adapter_t adapter, u32 ErrorStatus);
static bool sxg_mac_filter(p_adapter_t adapter,
p_ether_header EtherHdr, ushort length);
#if SLIC_GET_STATS_ENABLED
static struct net_device_stats *sxg_get_stats(p_net_device dev);
#endif
static int sxg_mac_set_address(p_net_device dev, void * ptr);
static void sxg_adapter_set_hwaddr(p_adapter_t adapter);
static void sxg_unmap_mmio_space(p_adapter_t adapter);
static void sxg_mcast_set_mask(p_adapter_t adapter);
static int sxg_initialize_adapter(p_adapter_t adapter);
static void sxg_stock_rcv_buffers(p_adapter_t adapter);
static void sxg_complete_descriptor_blocks(p_adapter_t adapter, unsigned char Index);
static int sxg_initialize_link(p_adapter_t adapter);
static int sxg_phy_init(p_adapter_t adapter);
static void sxg_link_event(p_adapter_t adapter);
static SXG_LINK_STATE sxg_get_link_state(p_adapter_t adapter);
static void sxg_link_state(p_adapter_t adapter, SXG_LINK_STATE LinkState);
static int sxg_write_mdio_reg(p_adapter_t adapter,
u32 DevAddr, u32 RegAddr, u32 Value);
static int sxg_read_mdio_reg(p_adapter_t adapter,
u32 DevAddr, u32 RegAddr, u32 * pValue);
static void sxg_mcast_set_list(p_net_device dev);
#define XXXTODO 0
static unsigned int sxg_first_init = 1;
static char *sxg_banner =
"Alacritech SLIC Technology(tm) Server and Storage 10Gbe Accelerator (Non-Accelerated)\n";
static int sxg_debug = 1;
static int debug = -1;
static p_net_device head_netdevice = NULL;
static sxgbase_driver_t sxg_global = {
.dynamic_intagg = 1,
};
static int intagg_delay = 100;
static u32 dynamic_intagg = 0;
#define DRV_NAME "sxg"
#define DRV_VERSION "1.0.1"
#define DRV_AUTHOR "Alacritech, Inc. Engineering"
#define DRV_DESCRIPTION "Alacritech SLIC Techonology(tm) Non-Accelerated 10Gbe Driver"
#define DRV_COPYRIGHT "Copyright 2000-2008 Alacritech, Inc. All rights reserved."
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_LICENSE("GPL");
module_param(dynamic_intagg, int, 0);
MODULE_PARM_DESC(dynamic_intagg, "Dynamic Interrupt Aggregation Setting");
module_param(intagg_delay, int, 0);
MODULE_PARM_DESC(intagg_delay, "uSec Interrupt Aggregation Delay");
static struct pci_device_id sxg_pci_tbl[] __devinitdata = {
{PCI_DEVICE(SXG_VENDOR_ID, SXG_DEVICE_ID)},
{0,}
};
MODULE_DEVICE_TABLE(pci, sxg_pci_tbl);
/***********************************************************************
************************************************************************
************************************************************************
************************************************************************
************************************************************************/
static inline void sxg_reg32_write(void __iomem *reg, u32 value, bool flush)
{
writel(value, reg);
if (flush)
mb();
}
static inline void sxg_reg64_write(p_adapter_t adapter, void __iomem *reg,
u64 value, u32 cpu)
{
u32 value_high = (u32) (value >> 32);
u32 value_low = (u32) (value & 0x00000000FFFFFFFF);
unsigned long flags;
spin_lock_irqsave(&adapter->Bit64RegLock, flags);
writel(value_high, (void __iomem *)(&adapter->UcodeRegs[cpu].Upper));
writel(value_low, reg);
spin_unlock_irqrestore(&adapter->Bit64RegLock, flags);
}
static void sxg_init_driver(void)
{
if (sxg_first_init) {
DBG_ERROR("sxg: %s sxg_first_init set jiffies[%lx]\n",
__FUNCTION__, jiffies);
sxg_first_init = 0;
spin_lock_init(&sxg_global.driver_lock);
}
}
static void sxg_dbg_macaddrs(p_adapter_t adapter)
{
DBG_ERROR(" (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
adapter->netdev->name, adapter->currmacaddr[0],
adapter->currmacaddr[1], adapter->currmacaddr[2],
adapter->currmacaddr[3], adapter->currmacaddr[4],
adapter->currmacaddr[5]);
DBG_ERROR(" (%s) mac %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
adapter->netdev->name, adapter->macaddr[0],
adapter->macaddr[1], adapter->macaddr[2],
adapter->macaddr[3], adapter->macaddr[4],
adapter->macaddr[5]);
return;
}
// SXG Globals
static SXG_DRIVER SxgDriver;
#ifdef ATKDBG
static sxg_trace_buffer_t LSxgTraceBuffer;
#endif /* ATKDBG */
static sxg_trace_buffer_t *SxgTraceBuffer = NULL;
/*
* sxg_download_microcode
*
* Download Microcode to Sahara adapter
*
* Arguments -
* adapter - A pointer to our adapter structure
* UcodeSel - microcode file selection
*
* Return
* int
*/
static bool sxg_download_microcode(p_adapter_t adapter, SXG_UCODE_SEL UcodeSel)
{
PSXG_HW_REGS HwRegs = adapter->HwRegs;
u32 Section;
u32 ThisSectionSize;
u32 * Instruction = NULL;
u32 BaseAddress, AddressOffset, Address;
// u32 Failure;
u32 ValueRead;
u32 i;
u32 numSections = 0;
u32 sectionSize[16];
u32 sectionStart[16];
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DnldUcod",
adapter, 0, 0, 0);
DBG_ERROR("sxg: %s ENTER\n", __FUNCTION__);
switch (UcodeSel) {
case SXG_UCODE_SAHARA: // Sahara operational ucode
numSections = SNumSections;
for (i = 0; i < numSections; i++) {
sectionSize[i] = SSectionSize[i];
sectionStart[i] = SSectionStart[i];
}
break;
default:
printk(KERN_ERR KBUILD_MODNAME
": Woah, big error with the microcode!\n");
break;
}
DBG_ERROR("sxg: RESET THE CARD\n");
// First, reset the card
WRITE_REG(HwRegs->Reset, 0xDEAD, FLUSH);
// Download each section of the microcode as specified in
// its download file. The *download.c file is generated using
// the saharaobjtoc facility which converts the metastep .obj
// file to a .c file which contains a two dimentional array.
for (Section = 0; Section < numSections; Section++) {
DBG_ERROR("sxg: SECTION # %d\n", Section);
switch (UcodeSel) {
case SXG_UCODE_SAHARA:
Instruction = (u32 *) & SaharaUCode[Section][0];
break;
default:
ASSERT(0);
break;
}
BaseAddress = sectionStart[Section];
ThisSectionSize = sectionSize[Section] / 12; // Size in instructions
for (AddressOffset = 0; AddressOffset < ThisSectionSize;
AddressOffset++) {
Address = BaseAddress + AddressOffset;
ASSERT((Address & ~MICROCODE_ADDRESS_MASK) == 0);
// Write instruction bits 31 - 0
WRITE_REG(HwRegs->UcodeDataLow, *Instruction, FLUSH);
// Write instruction bits 63-32
WRITE_REG(HwRegs->UcodeDataMiddle, *(Instruction + 1),
FLUSH);
// Write instruction bits 95-64
WRITE_REG(HwRegs->UcodeDataHigh, *(Instruction + 2),
FLUSH);
// Write instruction address with the WRITE bit set
WRITE_REG(HwRegs->UcodeAddr,
(Address | MICROCODE_ADDRESS_WRITE), FLUSH);
// Sahara bug in the ucode download logic - the write to DataLow
// for the next instruction could get corrupted. To avoid this,
// write to DataLow again for this instruction (which may get
// corrupted, but it doesn't matter), then increment the address
// and write the data for the next instruction to DataLow. That
// write should succeed.
WRITE_REG(HwRegs->UcodeDataLow, *Instruction, TRUE);
// Advance 3 u32S to start of next instruction
Instruction += 3;
}
}
// Now repeat the entire operation reading the instruction back and
// checking for parity errors
for (Section = 0; Section < numSections; Section++) {
DBG_ERROR("sxg: check SECTION # %d\n", Section);
switch (UcodeSel) {
case SXG_UCODE_SAHARA:
Instruction = (u32 *) & SaharaUCode[Section][0];
break;
default:
ASSERT(0);
break;
}
BaseAddress = sectionStart[Section];
ThisSectionSize = sectionSize[Section] / 12; // Size in instructions
for (AddressOffset = 0; AddressOffset < ThisSectionSize;
AddressOffset++) {
Address = BaseAddress + AddressOffset;
// Write the address with the READ bit set
WRITE_REG(HwRegs->UcodeAddr,
(Address | MICROCODE_ADDRESS_READ), FLUSH);
// Read it back and check parity bit.
READ_REG(HwRegs->UcodeAddr, ValueRead);
if (ValueRead & MICROCODE_ADDRESS_PARITY) {
DBG_ERROR("sxg: %s PARITY ERROR\n",
__FUNCTION__);
return (FALSE); // Parity error
}
ASSERT((ValueRead & MICROCODE_ADDRESS_MASK) == Address);
// Read the instruction back and compare
READ_REG(HwRegs->UcodeDataLow, ValueRead);
if (ValueRead != *Instruction) {
DBG_ERROR("sxg: %s MISCOMPARE LOW\n",
__FUNCTION__);
return (FALSE); // Miscompare
}
READ_REG(HwRegs->UcodeDataMiddle, ValueRead);
if (ValueRead != *(Instruction + 1)) {
DBG_ERROR("sxg: %s MISCOMPARE MIDDLE\n",
__FUNCTION__);
return (FALSE); // Miscompare
}
READ_REG(HwRegs->UcodeDataHigh, ValueRead);
if (ValueRead != *(Instruction + 2)) {
DBG_ERROR("sxg: %s MISCOMPARE HIGH\n",
__FUNCTION__);
return (FALSE); // Miscompare
}
// Advance 3 u32S to start of next instruction
Instruction += 3;
}
}
// Everything OK, Go.
WRITE_REG(HwRegs->UcodeAddr, MICROCODE_ADDRESS_GO, FLUSH);
// Poll the CardUp register to wait for microcode to initialize
// Give up after 10,000 attemps (500ms).
for (i = 0; i < 10000; i++) {
udelay(50);
READ_REG(adapter->UcodeRegs[0].CardUp, ValueRead);
if (ValueRead == 0xCAFE) {
DBG_ERROR("sxg: %s BOO YA 0xCAFE\n", __FUNCTION__);
break;
}
}
if (i == 10000) {
DBG_ERROR("sxg: %s TIMEOUT\n", __FUNCTION__);
return (FALSE); // Timeout
}
// Now write the LoadSync register. This is used to
// synchronize with the card so it can scribble on the memory
// that contained 0xCAFE from the "CardUp" step above
if (UcodeSel == SXG_UCODE_SAHARA) {
WRITE_REG(adapter->UcodeRegs[0].LoadSync, 0, FLUSH);
}
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDnldUcd",
adapter, 0, 0, 0);
DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
return (TRUE);
}
/*
* sxg_allocate_resources - Allocate memory and locks
*
* Arguments -
* adapter - A pointer to our adapter structure
*
* Return
* int
*/
static int sxg_allocate_resources(p_adapter_t adapter)
{
int status;
u32 i;
u32 RssIds, IsrCount;
// PSXG_XMT_RING XmtRing;
// PSXG_RCV_RING RcvRing;
DBG_ERROR("%s ENTER\n", __FUNCTION__);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocRes",
adapter, 0, 0, 0);
// Windows tells us how many CPUs it plans to use for
// RSS
RssIds = SXG_RSS_CPU_COUNT(adapter);
IsrCount = adapter->MsiEnabled ? RssIds : 1;
DBG_ERROR("%s Setup the spinlocks\n", __FUNCTION__);
// Allocate spinlocks and initialize listheads first.
spin_lock_init(&adapter->RcvQLock);
spin_lock_init(&adapter->SglQLock);
spin_lock_init(&adapter->XmtZeroLock);
spin_lock_init(&adapter->Bit64RegLock);
spin_lock_init(&adapter->AdapterLock);
DBG_ERROR("%s Setup the lists\n", __FUNCTION__);
InitializeListHead(&adapter->FreeRcvBuffers);
InitializeListHead(&adapter->FreeRcvBlocks);
InitializeListHead(&adapter->AllRcvBlocks);
InitializeListHead(&adapter->FreeSglBuffers);
InitializeListHead(&adapter->AllSglBuffers);
// Mark these basic allocations done. This flags essentially
// tells the SxgFreeResources routine that it can grab spinlocks
// and reference listheads.
adapter->BasicAllocations = TRUE;
// Main allocation loop. Start with the maximum supported by
// the microcode and back off if memory allocation
// fails. If we hit a minimum, fail.
for (;;) {
DBG_ERROR("%s Allocate XmtRings size[%x]\n", __FUNCTION__,
(sizeof(SXG_XMT_RING) * 1));
// Start with big items first - receive and transmit rings. At the moment
// I'm going to keep the ring size fixed and adjust the number of
// TCBs if we fail. Later we might consider reducing the ring size as well..
adapter->XmtRings = pci_alloc_consistent(adapter->pcidev,
sizeof(SXG_XMT_RING) *
1,
&adapter->PXmtRings);
DBG_ERROR("%s XmtRings[%p]\n", __FUNCTION__, adapter->XmtRings);
if (!adapter->XmtRings) {
goto per_tcb_allocation_failed;
}
memset(adapter->XmtRings, 0, sizeof(SXG_XMT_RING) * 1);
DBG_ERROR("%s Allocate RcvRings size[%x]\n", __FUNCTION__,
(sizeof(SXG_RCV_RING) * 1));
adapter->RcvRings =
pci_alloc_consistent(adapter->pcidev,
sizeof(SXG_RCV_RING) * 1,
&adapter->PRcvRings);
DBG_ERROR("%s RcvRings[%p]\n", __FUNCTION__, adapter->RcvRings);
if (!adapter->RcvRings) {
goto per_tcb_allocation_failed;
}
memset(adapter->RcvRings, 0, sizeof(SXG_RCV_RING) * 1);
break;
per_tcb_allocation_failed:
// an allocation failed. Free any successful allocations.
if (adapter->XmtRings) {
pci_free_consistent(adapter->pcidev,
sizeof(SXG_XMT_RING) * 4096,
adapter->XmtRings,
adapter->PXmtRings);
adapter->XmtRings = NULL;
}
if (adapter->RcvRings) {
pci_free_consistent(adapter->pcidev,
sizeof(SXG_RCV_RING) * 4096,
adapter->RcvRings,
adapter->PRcvRings);
adapter->RcvRings = NULL;
}
// Loop around and try again....
}
DBG_ERROR("%s Initialize RCV ZERO and XMT ZERO rings\n", __FUNCTION__);
// Initialize rcv zero and xmt zero rings
SXG_INITIALIZE_RING(adapter->RcvRingZeroInfo, SXG_RCV_RING_SIZE);
SXG_INITIALIZE_RING(adapter->XmtRingZeroInfo, SXG_XMT_RING_SIZE);
// Sanity check receive data structure format
ASSERT((adapter->ReceiveBufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
(adapter->ReceiveBufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
ASSERT(sizeof(SXG_RCV_DESCRIPTOR_BLOCK) ==
SXG_RCV_DESCRIPTOR_BLOCK_SIZE);
// Allocate receive data buffers. We allocate a block of buffers and
// a corresponding descriptor block at once. See sxghw.h:SXG_RCV_BLOCK
for (i = 0; i < SXG_INITIAL_RCV_DATA_BUFFERS;
i += SXG_RCV_DESCRIPTORS_PER_BLOCK) {
sxg_allocate_buffer_memory(adapter,
SXG_RCV_BLOCK_SIZE(adapter->
ReceiveBufferSize),
SXG_BUFFER_TYPE_RCV);
}
// NBL resource allocation can fail in the 'AllocateComplete' routine, which
// doesn't return status. Make sure we got the number of buffers we requested
if (adapter->FreeRcvBufferCount < SXG_INITIAL_RCV_DATA_BUFFERS) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF6",
adapter, adapter->FreeRcvBufferCount, SXG_MAX_ENTRIES,
0);
return (STATUS_RESOURCES);
}
DBG_ERROR("%s Allocate EventRings size[%x]\n", __FUNCTION__,
(sizeof(SXG_EVENT_RING) * RssIds));
// Allocate event queues.
adapter->EventRings = pci_alloc_consistent(adapter->pcidev,
sizeof(SXG_EVENT_RING) *
RssIds,
&adapter->PEventRings);
if (!adapter->EventRings) {
// Caller will call SxgFreeAdapter to clean up above allocations
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF8",
adapter, SXG_MAX_ENTRIES, 0, 0);
status = STATUS_RESOURCES;
goto per_tcb_allocation_failed;
}
memset(adapter->EventRings, 0, sizeof(SXG_EVENT_RING) * RssIds);
DBG_ERROR("%s Allocate ISR size[%x]\n", __FUNCTION__, IsrCount);
// Allocate ISR
adapter->Isr = pci_alloc_consistent(adapter->pcidev,
IsrCount, &adapter->PIsr);
if (!adapter->Isr) {
// Caller will call SxgFreeAdapter to clean up above allocations
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF9",
adapter, SXG_MAX_ENTRIES, 0, 0);
status = STATUS_RESOURCES;
goto per_tcb_allocation_failed;
}
memset(adapter->Isr, 0, sizeof(u32) * IsrCount);
DBG_ERROR("%s Allocate shared XMT ring zero index location size[%x]\n",
__FUNCTION__, sizeof(u32));
// Allocate shared XMT ring zero index location
adapter->XmtRingZeroIndex = pci_alloc_consistent(adapter->pcidev,
sizeof(u32),
&adapter->
PXmtRingZeroIndex);
if (!adapter->XmtRingZeroIndex) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAResF10",
adapter, SXG_MAX_ENTRIES, 0, 0);
status = STATUS_RESOURCES;
goto per_tcb_allocation_failed;
}
memset(adapter->XmtRingZeroIndex, 0, sizeof(u32));
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlcResS",
adapter, SXG_MAX_ENTRIES, 0, 0);
DBG_ERROR("%s EXIT\n", __FUNCTION__);
return (STATUS_SUCCESS);
}
/*
* sxg_config_pci -
*
* Set up PCI Configuration space
*
* Arguments -
* pcidev - A pointer to our adapter structure
*
*/
static void sxg_config_pci(struct pci_dev *pcidev)
{
u16 pci_command;
u16 new_command;
pci_read_config_word(pcidev, PCI_COMMAND, &pci_command);
DBG_ERROR("sxg: %s PCI command[%4.4x]\n", __FUNCTION__, pci_command);
// Set the command register
new_command = pci_command | (PCI_COMMAND_MEMORY | // Memory Space Enable
PCI_COMMAND_MASTER | // Bus master enable
PCI_COMMAND_INVALIDATE | // Memory write and invalidate
PCI_COMMAND_PARITY | // Parity error response
PCI_COMMAND_SERR | // System ERR
PCI_COMMAND_FAST_BACK); // Fast back-to-back
if (pci_command != new_command) {
DBG_ERROR("%s -- Updating PCI COMMAND register %4.4x->%4.4x.\n",
__FUNCTION__, pci_command, new_command);
pci_write_config_word(pcidev, PCI_COMMAND, new_command);
}
}
static int sxg_entry_probe(struct pci_dev *pcidev,
const struct pci_device_id *pci_tbl_entry)
{
static int did_version = 0;
int err;
struct net_device *netdev;
p_adapter_t adapter;
void __iomem *memmapped_ioaddr;
u32 status = 0;
ulong mmio_start = 0;
ulong mmio_len = 0;
DBG_ERROR("sxg: %s 2.6 VERSION ENTER jiffies[%lx] cpu %d\n",
__FUNCTION__, jiffies, smp_processor_id());
// Initialize trace buffer
#ifdef ATKDBG
SxgTraceBuffer = &LSxgTraceBuffer;
SXG_TRACE_INIT(SxgTraceBuffer, TRACE_NOISY);
#endif
sxg_global.dynamic_intagg = dynamic_intagg;
err = pci_enable_device(pcidev);
DBG_ERROR("Call pci_enable_device(%p) status[%x]\n", pcidev, err);
if (err) {
return err;
}
if (sxg_debug > 0 && did_version++ == 0) {
printk(KERN_INFO "%s\n", sxg_banner);
printk(KERN_INFO "%s\n", DRV_VERSION);
}
if (!(err = pci_set_dma_mask(pcidev, DMA_64BIT_MASK))) {
DBG_ERROR("pci_set_dma_mask(DMA_64BIT_MASK) successful\n");
} else {
if ((err = pci_set_dma_mask(pcidev, DMA_32BIT_MASK))) {
DBG_ERROR
("No usable DMA configuration, aborting err[%x]\n",
err);
return err;
}
DBG_ERROR("pci_set_dma_mask(DMA_32BIT_MASK) successful\n");
}
DBG_ERROR("Call pci_request_regions\n");
err = pci_request_regions(pcidev, DRV_NAME);
if (err) {
DBG_ERROR("pci_request_regions FAILED err[%x]\n", err);
return err;
}
DBG_ERROR("call pci_set_master\n");
pci_set_master(pcidev);
DBG_ERROR("call alloc_etherdev\n");
netdev = alloc_etherdev(sizeof(adapter_t));
if (!netdev) {
err = -ENOMEM;
goto err_out_exit_sxg_probe;
}
DBG_ERROR("alloc_etherdev for slic netdev[%p]\n", netdev);
SET_NETDEV_DEV(netdev, &pcidev->dev);
pci_set_drvdata(pcidev, netdev);
adapter = netdev_priv(netdev);
adapter->netdev = netdev;
adapter->pcidev = pcidev;
mmio_start = pci_resource_start(pcidev, 0);
mmio_len = pci_resource_len(pcidev, 0);
DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
mmio_start, mmio_len);
memmapped_ioaddr = ioremap(mmio_start, mmio_len);
DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __FUNCTION__, memmapped_ioaddr);
if (!memmapped_ioaddr) {
DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
__FUNCTION__, mmio_len, mmio_start);
goto err_out_free_mmio_region;
}
DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, start[%lx] len[%lx], IRQ %d.\n",
__func__, memmapped_ioaddr, mmio_start, mmio_len, pcidev->irq);
adapter->HwRegs = (void *) memmapped_ioaddr;
adapter->base_addr = memmapped_ioaddr;
mmio_start = pci_resource_start(pcidev, 2);
mmio_len = pci_resource_len(pcidev, 2);
DBG_ERROR("sxg: call ioremap(mmio_start[%lx], mmio_len[%lx])\n",
mmio_start, mmio_len);
memmapped_ioaddr = ioremap(mmio_start, mmio_len);
DBG_ERROR("sxg: %s MEMMAPPED_IOADDR [%p]\n", __func__, memmapped_ioaddr);
if (!memmapped_ioaddr) {
DBG_ERROR("%s cannot remap MMIO region %lx @ %lx\n",
__FUNCTION__, mmio_len, mmio_start);
goto err_out_free_mmio_region;
}
DBG_ERROR("sxg: %s found Alacritech SXG PCI, MMIO at %p, "
"start[%lx] len[%lx], IRQ %d.\n", __func__,
memmapped_ioaddr, mmio_start, mmio_len, pcidev->irq);
adapter->UcodeRegs = (void *)memmapped_ioaddr;
adapter->State = SXG_STATE_INITIALIZING;
// Maintain a list of all adapters anchored by
// the global SxgDriver structure.
adapter->Next = SxgDriver.Adapters;
SxgDriver.Adapters = adapter;
adapter->AdapterID = ++SxgDriver.AdapterID;
// Initialize CRC table used to determine multicast hash
sxg_mcast_init_crc32();
adapter->JumboEnabled = FALSE;
adapter->RssEnabled = FALSE;
if (adapter->JumboEnabled) {
adapter->FrameSize = JUMBOMAXFRAME;
adapter->ReceiveBufferSize = SXG_RCV_JUMBO_BUFFER_SIZE;
} else {
adapter->FrameSize = ETHERMAXFRAME;
adapter->ReceiveBufferSize = SXG_RCV_DATA_BUFFER_SIZE;
}
// status = SXG_READ_EEPROM(adapter);
// if (!status) {
// goto sxg_init_bad;
// }
DBG_ERROR("sxg: %s ENTER sxg_config_pci\n", __FUNCTION__);
sxg_config_pci(pcidev);
DBG_ERROR("sxg: %s EXIT sxg_config_pci\n", __FUNCTION__);
DBG_ERROR("sxg: %s ENTER sxg_init_driver\n", __FUNCTION__);
sxg_init_driver();
DBG_ERROR("sxg: %s EXIT sxg_init_driver\n", __FUNCTION__);
adapter->vendid = pci_tbl_entry->vendor;
adapter->devid = pci_tbl_entry->device;
adapter->subsysid = pci_tbl_entry->subdevice;
adapter->busnumber = pcidev->bus->number;
adapter->slotnumber = ((pcidev->devfn >> 3) & 0x1F);
adapter->functionnumber = (pcidev->devfn & 0x7);
adapter->memorylength = pci_resource_len(pcidev, 0);
adapter->irq = pcidev->irq;
adapter->next_netdevice = head_netdevice;
head_netdevice = netdev;
// adapter->chipid = chip_idx;
adapter->port = 0; //adapter->functionnumber;
adapter->cardindex = adapter->port;
// Allocate memory and other resources
DBG_ERROR("sxg: %s ENTER sxg_allocate_resources\n", __FUNCTION__);
status = sxg_allocate_resources(adapter);
DBG_ERROR("sxg: %s EXIT sxg_allocate_resources status %x\n",
__FUNCTION__, status);
if (status != STATUS_SUCCESS) {
goto err_out_unmap;
}
DBG_ERROR("sxg: %s ENTER sxg_download_microcode\n", __FUNCTION__);
if (sxg_download_microcode(adapter, SXG_UCODE_SAHARA)) {
DBG_ERROR("sxg: %s ENTER sxg_adapter_set_hwaddr\n",
__FUNCTION__);
sxg_adapter_set_hwaddr(adapter);
} else {
adapter->state = ADAPT_FAIL;
adapter->linkstate = LINK_DOWN;
DBG_ERROR("sxg_download_microcode FAILED status[%x]\n", status);
}
netdev->base_addr = (unsigned long)adapter->base_addr;
netdev->irq = adapter->irq;
netdev->open = sxg_entry_open;
netdev->stop = sxg_entry_halt;
netdev->hard_start_xmit = sxg_send_packets;
netdev->do_ioctl = sxg_ioctl;
#if XXXTODO
netdev->set_mac_address = sxg_mac_set_address;
#if SLIC_GET_STATS_ENABLED
netdev->get_stats = sxg_get_stats;
#endif
netdev->set_multicast_list = sxg_mcast_set_list;
#endif
strcpy(netdev->name, "eth%d");
// strcpy(netdev->name, pci_name(pcidev));
if ((err = register_netdev(netdev))) {
DBG_ERROR("Cannot register net device, aborting. %s\n",
netdev->name);
goto err_out_unmap;
}
DBG_ERROR
("sxg: %s addr 0x%lx, irq %d, MAC addr %02X:%02X:%02X:%02X:%02X:%02X\n",
netdev->name, netdev->base_addr, pcidev->irq, netdev->dev_addr[0],
netdev->dev_addr[1], netdev->dev_addr[2], netdev->dev_addr[3],
netdev->dev_addr[4], netdev->dev_addr[5]);
//sxg_init_bad:
ASSERT(status == FALSE);
// sxg_free_adapter(adapter);
DBG_ERROR("sxg: %s EXIT status[%x] jiffies[%lx] cpu %d\n", __FUNCTION__,
status, jiffies, smp_processor_id());
return status;
err_out_unmap:
iounmap((void *)memmapped_ioaddr);
err_out_free_mmio_region:
release_mem_region(mmio_start, mmio_len);
err_out_exit_sxg_probe:
DBG_ERROR("%s EXIT jiffies[%lx] cpu %d\n", __FUNCTION__, jiffies,
smp_processor_id());
return -ENODEV;
}
/***********************************************************************
* LINE BASE Interrupt routines..
***********************************************************************/
/*
*
* sxg_disable_interrupt
*
* DisableInterrupt Handler
*
* Arguments:
*
* adapter: Our adapter structure
*
* Return Value:
* None.
*/
static void sxg_disable_interrupt(p_adapter_t adapter)
{
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DisIntr",
adapter, adapter->InterruptsEnabled, 0, 0);
// For now, RSS is disabled with line based interrupts
ASSERT(adapter->RssEnabled == FALSE);
ASSERT(adapter->MsiEnabled == FALSE);
//
// Turn off interrupts by writing to the icr register.
//
WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_DISABLE), TRUE);
adapter->InterruptsEnabled = 0;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDisIntr",
adapter, adapter->InterruptsEnabled, 0, 0);
}
/*
*
* sxg_enable_interrupt
*
* EnableInterrupt Handler
*
* Arguments:
*
* adapter: Our adapter structure
*
* Return Value:
* None.
*/
static void sxg_enable_interrupt(p_adapter_t adapter)
{
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "EnIntr",
adapter, adapter->InterruptsEnabled, 0, 0);
// For now, RSS is disabled with line based interrupts
ASSERT(adapter->RssEnabled == FALSE);
ASSERT(adapter->MsiEnabled == FALSE);
//
// Turn on interrupts by writing to the icr register.
//
WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_ENABLE), TRUE);
adapter->InterruptsEnabled = 1;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XEnIntr",
adapter, 0, 0, 0);
}
/*
*
* sxg_isr - Process an line-based interrupt
*
* Arguments:
* Context - Our adapter structure
* QueueDefault - Output parameter to queue to default CPU
* TargetCpus - Output bitmap to schedule DPC's
*
* Return Value:
* TRUE if our interrupt
*/
static irqreturn_t sxg_isr(int irq, void *dev_id)
{
p_net_device dev = (p_net_device) dev_id;
p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
// u32 CpuMask = 0, i;
adapter->Stats.NumInts++;
if (adapter->Isr[0] == 0) {
// The SLIC driver used to experience a number of spurious interrupts
// due to the delay associated with the masking of the interrupt
// (we'd bounce back in here). If we see that again with Sahara,
// add a READ_REG of the Icr register after the WRITE_REG below.
adapter->Stats.FalseInts++;
return IRQ_NONE;
}
//
// Move the Isr contents and clear the value in
// shared memory, and mask interrupts
//
adapter->IsrCopy[0] = adapter->Isr[0];
adapter->Isr[0] = 0;
WRITE_REG(adapter->UcodeRegs[0].Icr, SXG_ICR(0, SXG_ICR_MASK), TRUE);
// ASSERT(adapter->IsrDpcsPending == 0);
#if XXXTODO // RSS Stuff
// If RSS is enabled and the ISR specifies
// SXG_ISR_EVENT, then schedule DPC's
// based on event queues.
if (adapter->RssEnabled && (adapter->IsrCopy[0] & SXG_ISR_EVENT)) {
for (i = 0;
i < adapter->RssSystemInfo->ProcessorInfo.RssCpuCount;
i++) {
PSXG_EVENT_RING EventRing = &adapter->EventRings[i];
PSXG_EVENT Event =
&EventRing->Ring[adapter->NextEvent[i]];
unsigned char Cpu = adapter->RssSystemInfo->RssIdToCpu[i];
if (Event->Status & EVENT_STATUS_VALID) {
adapter->IsrDpcsPending++;
CpuMask |= (1 << Cpu);
}
}
}
// Now, either schedule the CPUs specified by the CpuMask,
// or queue default
if (CpuMask) {
*QueueDefault = FALSE;
} else {
adapter->IsrDpcsPending = 1;
*QueueDefault = TRUE;
}
*TargetCpus = CpuMask;
#endif
//
// There are no DPCs in Linux, so call the handler now
//
sxg_handle_interrupt(adapter);
return IRQ_HANDLED;
}
static void sxg_handle_interrupt(p_adapter_t adapter)
{
// unsigned char RssId = 0;
u32 NewIsr;
if (adapter->Stats.RcvNoBuffer < 5) {
DBG_ERROR("Enter sxg_handle_interrupt ISR[%x]\n",
adapter->IsrCopy[0]);
}
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "HndlIntr",
adapter, adapter->IsrCopy[0], 0, 0);
// For now, RSS is disabled with line based interrupts
ASSERT(adapter->RssEnabled == FALSE);
ASSERT(adapter->MsiEnabled == FALSE);
ASSERT(adapter->IsrCopy[0]);
/////////////////////////////
// Always process the event queue.
sxg_process_event_queue(adapter,
(adapter->RssEnabled ? /*RssId */ 0 : 0));
#if XXXTODO // RSS stuff
if (--adapter->IsrDpcsPending) {
// We're done.
ASSERT(adapter->RssEnabled);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DPCsPend",
adapter, 0, 0, 0);
return;
}
#endif
//
// Last (or only) DPC processes the ISR and clears the interrupt.
//
NewIsr = sxg_process_isr(adapter, 0);
//
// Reenable interrupts
//
adapter->IsrCopy[0] = 0;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ClearIsr",
adapter, NewIsr, 0, 0);
if (adapter->Stats.RcvNoBuffer < 5) {
DBG_ERROR
("Exit sxg_handle_interrupt2 after enabling interrupt\n");
}
WRITE_REG(adapter->UcodeRegs[0].Isr, NewIsr, TRUE);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XHndlInt",
adapter, 0, 0, 0);
}
/*
*
* sxg_process_isr - Process an interrupt. Called from the line-based and
* message based interrupt DPC routines
*
* Arguments:
* adapter - Our adapter structure
* Queue - The ISR that needs processing
*
* Return Value:
* None
*/
static int sxg_process_isr(p_adapter_t adapter, u32 MessageId)
{
u32 Isr = adapter->IsrCopy[MessageId];
u32 NewIsr = 0;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "ProcIsr",
adapter, Isr, 0, 0);
// Error
if (Isr & SXG_ISR_ERR) {
if (Isr & SXG_ISR_PDQF) {
adapter->Stats.PdqFull++;
DBG_ERROR("%s: SXG_ISR_ERR PDQF!!\n", __FUNCTION__);
}
// No host buffer
if (Isr & SXG_ISR_RMISS) {
// There is a bunch of code in the SLIC driver which
// attempts to process more receive events per DPC
// if we start to fall behind. We'll probably
// need to do something similar here, but hold
// off for now. I don't want to make the code more
// complicated than strictly needed.
adapter->Stats.RcvNoBuffer++;
if (adapter->Stats.RcvNoBuffer < 5) {
DBG_ERROR("%s: SXG_ISR_ERR RMISS!!\n",
__FUNCTION__);
}
}
// Card crash
if (Isr & SXG_ISR_DEAD) {
// Set aside the crash info and set the adapter state to RESET
adapter->CrashCpu =
(unsigned char) ((Isr & SXG_ISR_CPU) >> SXG_ISR_CPU_SHIFT);
adapter->CrashLocation = (ushort) (Isr & SXG_ISR_CRASH);
adapter->Dead = TRUE;
DBG_ERROR("%s: ISR_DEAD %x, CPU: %d\n", __FUNCTION__,
adapter->CrashLocation, adapter->CrashCpu);
}
// Event ring full
if (Isr & SXG_ISR_ERFULL) {
// Same issue as RMISS, really. This means the
// host is falling behind the card. Need to increase
// event ring size, process more events per interrupt,
// and/or reduce/remove interrupt aggregation.
adapter->Stats.EventRingFull++;
DBG_ERROR("%s: SXG_ISR_ERR EVENT RING FULL!!\n",
__FUNCTION__);
}
// Transmit drop - no DRAM buffers or XMT error
if (Isr & SXG_ISR_XDROP) {
adapter->Stats.XmtDrops++;
adapter->Stats.XmtErrors++;
DBG_ERROR("%s: SXG_ISR_ERR XDROP!!\n", __FUNCTION__);
}
}
// Slowpath send completions
if (Isr & SXG_ISR_SPSEND) {
sxg_complete_slow_send(adapter);
}
// Dump
if (Isr & SXG_ISR_UPC) {
ASSERT(adapter->DumpCmdRunning); // Maybe change when debug is added..
adapter->DumpCmdRunning = FALSE;
}
// Link event
if (Isr & SXG_ISR_LINK) {
sxg_link_event(adapter);
}
// Debug - breakpoint hit
if (Isr & SXG_ISR_BREAK) {
// At the moment AGDB isn't written to support interactive
// debug sessions. When it is, this interrupt will be used
// to signal AGDB that it has hit a breakpoint. For now, ASSERT.
ASSERT(0);
}
// Heartbeat response
if (Isr & SXG_ISR_PING) {
adapter->PingOutstanding = FALSE;
}
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XProcIsr",
adapter, Isr, NewIsr, 0);
return (NewIsr);
}
/*
*
* sxg_process_event_queue - Process our event queue
*
* Arguments:
* - adapter - Adapter structure
* - RssId - The event queue requiring processing
*
* Return Value:
* None.
*/
static u32 sxg_process_event_queue(p_adapter_t adapter, u32 RssId)
{
PSXG_EVENT_RING EventRing = &adapter->EventRings[RssId];
PSXG_EVENT Event = &EventRing->Ring[adapter->NextEvent[RssId]];
u32 EventsProcessed = 0, Batches = 0;
u32 num_skbs = 0;
struct sk_buff *skb;
#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
struct sk_buff *prev_skb = NULL;
struct sk_buff *IndicationList[SXG_RCV_ARRAYSIZE];
u32 Index;
PSXG_RCV_DATA_BUFFER_HDR RcvDataBufferHdr;
#endif
u32 ReturnStatus = 0;
ASSERT((adapter->State == SXG_STATE_RUNNING) ||
(adapter->State == SXG_STATE_PAUSING) ||
(adapter->State == SXG_STATE_PAUSED) ||
(adapter->State == SXG_STATE_HALTING));
// We may still have unprocessed events on the queue if
// the card crashed. Don't process them.
if (adapter->Dead) {
return (0);
}
// In theory there should only be a single processor that
// accesses this queue, and only at interrupt-DPC time. So
// we shouldn't need a lock for any of this.
while (Event->Status & EVENT_STATUS_VALID) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "Event",
Event, Event->Code, Event->Status,
adapter->NextEvent);
switch (Event->Code) {
case EVENT_CODE_BUFFERS:
ASSERT(!(Event->CommandIndex & 0xFF00)); // SXG_RING_INFO Head & Tail == unsigned char
//
sxg_complete_descriptor_blocks(adapter,
Event->CommandIndex);
//
break;
case EVENT_CODE_SLOWRCV:
--adapter->RcvBuffersOnCard;
if ((skb = sxg_slow_receive(adapter, Event))) {
u32 rx_bytes;
#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
// Add it to our indication list
SXG_ADD_RCV_PACKET(adapter, skb, prev_skb,
IndicationList, num_skbs);
// In Linux, we just pass up each skb to the protocol above at this point,
// there is no capability of an indication list.
#else
// CHECK skb_pull(skb, INIC_RCVBUF_HEADSIZE);
rx_bytes = Event->Length; // (rcvbuf->length & IRHDDR_FLEN_MSK);
skb_put(skb, rx_bytes);
adapter->stats.rx_packets++;
adapter->stats.rx_bytes += rx_bytes;
#if SXG_OFFLOAD_IP_CHECKSUM
skb->ip_summed = CHECKSUM_UNNECESSARY;
#endif
skb->dev = adapter->netdev;
skb->protocol = eth_type_trans(skb, skb->dev);
netif_rx(skb);
#endif
}
break;
default:
DBG_ERROR("%s: ERROR Invalid EventCode %d\n",
__FUNCTION__, Event->Code);
// ASSERT(0);
}
// See if we need to restock card receive buffers.
// There are two things to note here:
// First - This test is not SMP safe. The
// adapter->BuffersOnCard field is protected via atomic interlocked calls, but
// we do not protect it with respect to these tests. The only way to do that
// is with a lock, and I don't want to grab a lock every time we adjust the
// BuffersOnCard count. Instead, we allow the buffer replenishment to be off
// once in a while. The worst that can happen is the card is given one
// more-or-less descriptor block than the arbitrary value we've chosen.
// No big deal
// In short DO NOT ADD A LOCK HERE, OR WHERE RcvBuffersOnCard is adjusted.
// Second - We expect this test to rarely evaluate to true. We attempt to
// refill descriptor blocks as they are returned to us
// (sxg_complete_descriptor_blocks), so The only time this should evaluate
// to true is when sxg_complete_descriptor_blocks failed to allocate
// receive buffers.
if (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
sxg_stock_rcv_buffers(adapter);
}
// It's more efficient to just set this to zero.
// But clearing the top bit saves potential debug info...
Event->Status &= ~EVENT_STATUS_VALID;
// Advanct to the next event
SXG_ADVANCE_INDEX(adapter->NextEvent[RssId], EVENT_RING_SIZE);
Event = &EventRing->Ring[adapter->NextEvent[RssId]];
EventsProcessed++;
if (EventsProcessed == EVENT_RING_BATCH) {
// Release a batch of events back to the card
WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
EVENT_RING_BATCH, FALSE);
EventsProcessed = 0;
// If we've processed our batch limit, break out of the
// loop and return SXG_ISR_EVENT to arrange for us to
// be called again
if (Batches++ == EVENT_BATCH_LIMIT) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
TRACE_NOISY, "EvtLimit", Batches,
adapter->NextEvent, 0, 0);
ReturnStatus = SXG_ISR_EVENT;
break;
}
}
}
#ifdef LINUX_HANDLES_RCV_INDICATION_LISTS
//
// Indicate any received dumb-nic frames
//
SXG_INDICATE_PACKETS(adapter, IndicationList, num_skbs);
#endif
//
// Release events back to the card.
//
if (EventsProcessed) {
WRITE_REG(adapter->UcodeRegs[RssId].EventRelease,
EventsProcessed, FALSE);
}
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XPrcEvnt",
Batches, EventsProcessed, adapter->NextEvent, num_skbs);
return (ReturnStatus);
}
/*
* sxg_complete_slow_send - Complete slowpath or dumb-nic sends
*
* Arguments -
* adapter - A pointer to our adapter structure
* Return
* None
*/
static void sxg_complete_slow_send(p_adapter_t adapter)
{
PSXG_XMT_RING XmtRing = &adapter->XmtRings[0];
PSXG_RING_INFO XmtRingInfo = &adapter->XmtRingZeroInfo;
u32 * ContextType;
PSXG_CMD XmtCmd;
// NOTE - This lock is dropped and regrabbed in this loop.
// This means two different processors can both be running
// through this loop. Be *very* careful.
spin_lock(&adapter->XmtZeroLock);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnds",
adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
while (XmtRingInfo->Tail != *adapter->XmtRingZeroIndex) {
// Locate the current Cmd (ring descriptor entry), and
// associated SGL, and advance the tail
SXG_RETURN_CMD(XmtRing, XmtRingInfo, XmtCmd, ContextType);
ASSERT(ContextType);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
XmtRingInfo->Head, XmtRingInfo->Tail, XmtCmd, 0);
// Clear the SGL field.
XmtCmd->Sgl = 0;
switch (*ContextType) {
case SXG_SGL_DUMB:
{
struct sk_buff *skb;
// Dumb-nic send. Command context is the dumb-nic SGL
skb = (struct sk_buff *)ContextType;
// Complete the send
SXG_TRACE(TRACE_SXG, SxgTraceBuffer,
TRACE_IMPORTANT, "DmSndCmp", skb, 0,
0, 0);
ASSERT(adapter->Stats.XmtQLen);
adapter->Stats.XmtQLen--; // within XmtZeroLock
adapter->Stats.XmtOk++;
// Now drop the lock and complete the send back to
// Microsoft. We need to drop the lock because
// Microsoft can come back with a chimney send, which
// results in a double trip in SxgTcpOuput
spin_unlock(&adapter->XmtZeroLock);
SXG_COMPLETE_DUMB_SEND(adapter, skb);
// and reacquire..
spin_lock(&adapter->XmtZeroLock);
}
break;
default:
ASSERT(0);
}
}
spin_unlock(&adapter->XmtZeroLock);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpSnd",
adapter, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
}
/*
* sxg_slow_receive
*
* Arguments -
* adapter - A pointer to our adapter structure
* Event - Receive event
*
* Return
* skb
*/
static struct sk_buff *sxg_slow_receive(p_adapter_t adapter, PSXG_EVENT Event)
{
PSXG_RCV_DATA_BUFFER_HDR RcvDataBufferHdr;
struct sk_buff *Packet;
RcvDataBufferHdr = (PSXG_RCV_DATA_BUFFER_HDR) Event->HostHandle;
ASSERT(RcvDataBufferHdr);
ASSERT(RcvDataBufferHdr->State == SXG_BUFFER_ONCARD);
ASSERT(SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr) ==
RcvDataBufferHdr->VirtualAddress);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "SlowRcv", Event,
RcvDataBufferHdr, RcvDataBufferHdr->State,
RcvDataBufferHdr->VirtualAddress);
// Drop rcv frames in non-running state
switch (adapter->State) {
case SXG_STATE_RUNNING:
break;
case SXG_STATE_PAUSING:
case SXG_STATE_PAUSED:
case SXG_STATE_HALTING:
goto drop;
default:
ASSERT(0);
goto drop;
}
// Change buffer state to UPSTREAM
RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM;
if (Event->Status & EVENT_STATUS_RCVERR) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvError",
Event, Event->Status, Event->HostHandle, 0);
// XXXTODO - Remove this print later
DBG_ERROR("SXG: Receive error %x\n",
*(u32 *)
SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr));
sxg_process_rcv_error(adapter,
*(u32 *)
SXG_RECEIVE_DATA_LOCATION
(RcvDataBufferHdr));
goto drop;
}
#if XXXTODO // VLAN stuff
// If there's a VLAN tag, extract it and validate it
if (((p_ether_header) (SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr)))->
EtherType == ETHERTYPE_VLAN) {
if (SxgExtractVlanHeader(adapter, RcvDataBufferHdr, Event) !=
STATUS_SUCCESS) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY,
"BadVlan", Event,
SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
Event->Length, 0);
goto drop;
}
}
#endif
//
// Dumb-nic frame. See if it passes our mac filter and update stats
//
if (!sxg_mac_filter(adapter,
(p_ether_header)
SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
Event->Length)) {
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RcvFiltr",
Event, SXG_RECEIVE_DATA_LOCATION(RcvDataBufferHdr),
Event->Length, 0);
goto drop;
}
Packet = RcvDataBufferHdr->SxgDumbRcvPacket;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumbRcv",
RcvDataBufferHdr, Packet, Event->Length, 0);
//
// Lastly adjust the receive packet length.
//
SXG_ADJUST_RCV_PACKET(Packet, RcvDataBufferHdr, Event);
return (Packet);
drop:
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DropRcv",
RcvDataBufferHdr, Event->Length, 0, 0);
adapter->Stats.RcvDiscards++;
spin_lock(&adapter->RcvQLock);
SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
spin_unlock(&adapter->RcvQLock);
return (NULL);
}
/*
* sxg_process_rcv_error - process receive error and update
* stats
*
* Arguments:
* adapter - Adapter structure
* ErrorStatus - 4-byte receive error status
*
* Return Value:
* None
*/
static void sxg_process_rcv_error(p_adapter_t adapter, u32 ErrorStatus)
{
u32 Error;
adapter->Stats.RcvErrors++;
if (ErrorStatus & SXG_RCV_STATUS_TRANSPORT_ERROR) {
Error = ErrorStatus & SXG_RCV_STATUS_TRANSPORT_MASK;
switch (Error) {
case SXG_RCV_STATUS_TRANSPORT_CSUM:
adapter->Stats.TransportCsum++;
break;
case SXG_RCV_STATUS_TRANSPORT_UFLOW:
adapter->Stats.TransportUflow++;
break;
case SXG_RCV_STATUS_TRANSPORT_HDRLEN:
adapter->Stats.TransportHdrLen++;
break;
}
}
if (ErrorStatus & SXG_RCV_STATUS_NETWORK_ERROR) {
Error = ErrorStatus & SXG_RCV_STATUS_NETWORK_MASK;
switch (Error) {
case SXG_RCV_STATUS_NETWORK_CSUM:
adapter->Stats.NetworkCsum++;
break;
case SXG_RCV_STATUS_NETWORK_UFLOW:
adapter->Stats.NetworkUflow++;
break;
case SXG_RCV_STATUS_NETWORK_HDRLEN:
adapter->Stats.NetworkHdrLen++;
break;
}
}
if (ErrorStatus & SXG_RCV_STATUS_PARITY) {
adapter->Stats.Parity++;
}
if (ErrorStatus & SXG_RCV_STATUS_LINK_ERROR) {
Error = ErrorStatus & SXG_RCV_STATUS_LINK_MASK;
switch (Error) {
case SXG_RCV_STATUS_LINK_PARITY:
adapter->Stats.LinkParity++;
break;
case SXG_RCV_STATUS_LINK_EARLY:
adapter->Stats.LinkEarly++;
break;
case SXG_RCV_STATUS_LINK_BUFOFLOW:
adapter->Stats.LinkBufOflow++;
break;
case SXG_RCV_STATUS_LINK_CODE:
adapter->Stats.LinkCode++;
break;
case SXG_RCV_STATUS_LINK_DRIBBLE:
adapter->Stats.LinkDribble++;
break;
case SXG_RCV_STATUS_LINK_CRC:
adapter->Stats.LinkCrc++;
break;
case SXG_RCV_STATUS_LINK_OFLOW:
adapter->Stats.LinkOflow++;
break;
case SXG_RCV_STATUS_LINK_UFLOW:
adapter->Stats.LinkUflow++;
break;
}
}
}
/*
* sxg_mac_filter
*
* Arguments:
* adapter - Adapter structure
* pether - Ethernet header
* length - Frame length
*
* Return Value:
* TRUE if the frame is to be allowed
*/
static bool sxg_mac_filter(p_adapter_t adapter, p_ether_header EtherHdr, ushort length)
{
bool EqualAddr;
if (SXG_MULTICAST_PACKET(EtherHdr)) {
if (SXG_BROADCAST_PACKET(EtherHdr)) {
// broadcast
if (adapter->MacFilter & MAC_BCAST) {
adapter->Stats.DumbRcvBcastPkts++;
adapter->Stats.DumbRcvBcastBytes += length;
adapter->Stats.DumbRcvPkts++;
adapter->Stats.DumbRcvBytes += length;
return (TRUE);
}
} else {
// multicast
if (adapter->MacFilter & MAC_ALLMCAST) {
adapter->Stats.DumbRcvMcastPkts++;
adapter->Stats.DumbRcvMcastBytes += length;
adapter->Stats.DumbRcvPkts++;
adapter->Stats.DumbRcvBytes += length;
return (TRUE);
}
if (adapter->MacFilter & MAC_MCAST) {
PSXG_MULTICAST_ADDRESS MulticastAddrs =
adapter->MulticastAddrs;
while (MulticastAddrs) {
ETHER_EQ_ADDR(MulticastAddrs->Address,
EtherHdr->ether_dhost,
EqualAddr);
if (EqualAddr) {
adapter->Stats.
DumbRcvMcastPkts++;
adapter->Stats.
DumbRcvMcastBytes += length;
adapter->Stats.DumbRcvPkts++;
adapter->Stats.DumbRcvBytes +=
length;
return (TRUE);
}
MulticastAddrs = MulticastAddrs->Next;
}
}
}
} else if (adapter->MacFilter & MAC_DIRECTED) {
// Not broadcast or multicast. Must be directed at us or
// the card is in promiscuous mode. Either way, consider it
// ours if MAC_DIRECTED is set
adapter->Stats.DumbRcvUcastPkts++;
adapter->Stats.DumbRcvUcastBytes += length;
adapter->Stats.DumbRcvPkts++;
adapter->Stats.DumbRcvBytes += length;
return (TRUE);
}
if (adapter->MacFilter & MAC_PROMISC) {
// Whatever it is, keep it.
adapter->Stats.DumbRcvPkts++;
adapter->Stats.DumbRcvBytes += length;
return (TRUE);
}
adapter->Stats.RcvDiscards++;
return (FALSE);
}
static int sxg_register_interrupt(p_adapter_t adapter)
{
if (!adapter->intrregistered) {
int retval;
DBG_ERROR
("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x] %x\n",
__FUNCTION__, adapter, adapter->netdev->irq, NR_IRQS);
spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
retval = request_irq(adapter->netdev->irq,
&sxg_isr,
IRQF_SHARED,
adapter->netdev->name, adapter->netdev);
spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
if (retval) {
DBG_ERROR("sxg: request_irq (%s) FAILED [%x]\n",
adapter->netdev->name, retval);
return (retval);
}
adapter->intrregistered = 1;
adapter->IntRegistered = TRUE;
// Disable RSS with line-based interrupts
adapter->MsiEnabled = FALSE;
adapter->RssEnabled = FALSE;
DBG_ERROR("sxg: %s AllocAdaptRsrcs adapter[%p] dev->irq[%x]\n",
__FUNCTION__, adapter, adapter->netdev->irq);
}
return (STATUS_SUCCESS);
}
static void sxg_deregister_interrupt(p_adapter_t adapter)
{
DBG_ERROR("sxg: %s ENTER adapter[%p]\n", __FUNCTION__, adapter);
#if XXXTODO
slic_init_cleanup(adapter);
#endif
memset(&adapter->stats, 0, sizeof(struct net_device_stats));
adapter->error_interrupts = 0;
adapter->rcv_interrupts = 0;
adapter->xmit_interrupts = 0;
adapter->linkevent_interrupts = 0;
adapter->upr_interrupts = 0;
adapter->num_isrs = 0;
adapter->xmit_completes = 0;
adapter->rcv_broadcasts = 0;
adapter->rcv_multicasts = 0;
adapter->rcv_unicasts = 0;
DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
}
/*
* sxg_if_init
*
* Perform initialization of our slic interface.
*
*/
static int sxg_if_init(p_adapter_t adapter)
{
p_net_device dev = adapter->netdev;
int status = 0;
DBG_ERROR("sxg: %s (%s) ENTER states[%d:%d:%d] flags[%x]\n",
__FUNCTION__, adapter->netdev->name,
adapter->queues_initialized, adapter->state,
adapter->linkstate, dev->flags);
/* adapter should be down at this point */
if (adapter->state != ADAPT_DOWN) {
DBG_ERROR("sxg_if_init adapter->state != ADAPT_DOWN\n");
return (-EIO);
}
ASSERT(adapter->linkstate == LINK_DOWN);
adapter->devflags_prev = dev->flags;
adapter->macopts = MAC_DIRECTED;
if (dev->flags) {
DBG_ERROR("sxg: %s (%s) Set MAC options: ", __FUNCTION__,
adapter->netdev->name);
if (dev->flags & IFF_BROADCAST) {
adapter->macopts |= MAC_BCAST;
DBG_ERROR("BCAST ");
}
if (dev->flags & IFF_PROMISC) {
adapter->macopts |= MAC_PROMISC;
DBG_ERROR("PROMISC ");
}
if (dev->flags & IFF_ALLMULTI) {
adapter->macopts |= MAC_ALLMCAST;
DBG_ERROR("ALL_MCAST ");
}
if (dev->flags & IFF_MULTICAST) {
adapter->macopts |= MAC_MCAST;
DBG_ERROR("MCAST ");
}
DBG_ERROR("\n");
}
status = sxg_register_interrupt(adapter);
if (status != STATUS_SUCCESS) {
DBG_ERROR("sxg_if_init: sxg_register_interrupt FAILED %x\n",
status);
sxg_deregister_interrupt(adapter);
return (status);
}
adapter->state = ADAPT_UP;
/*
* clear any pending events, then enable interrupts
*/
DBG_ERROR("sxg: %s ENABLE interrupts(slic)\n", __FUNCTION__);
return (STATUS_SUCCESS);
}
static int sxg_entry_open(p_net_device dev)
{
p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
int status;
ASSERT(adapter);
DBG_ERROR("sxg: %s adapter->activated[%d]\n", __FUNCTION__,
adapter->activated);
DBG_ERROR
("sxg: %s (%s): [jiffies[%lx] cpu %d] dev[%p] adapt[%p] port[%d]\n",
__FUNCTION__, adapter->netdev->name, jiffies, smp_processor_id(),
adapter->netdev, adapter, adapter->port);
netif_stop_queue(adapter->netdev);
spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
if (!adapter->activated) {
sxg_global.num_sxg_ports_active++;
adapter->activated = 1;
}
// Initialize the adapter
DBG_ERROR("sxg: %s ENTER sxg_initialize_adapter\n", __FUNCTION__);
status = sxg_initialize_adapter(adapter);
DBG_ERROR("sxg: %s EXIT sxg_initialize_adapter status[%x]\n",
__FUNCTION__, status);
if (status == STATUS_SUCCESS) {
DBG_ERROR("sxg: %s ENTER sxg_if_init\n", __FUNCTION__);
status = sxg_if_init(adapter);
DBG_ERROR("sxg: %s EXIT sxg_if_init status[%x]\n", __FUNCTION__,
status);
}
if (status != STATUS_SUCCESS) {
if (adapter->activated) {
sxg_global.num_sxg_ports_active--;
adapter->activated = 0;
}
spin_unlock_irqrestore(&sxg_global.driver_lock,
sxg_global.flags);
return (status);
}
DBG_ERROR("sxg: %s ENABLE ALL INTERRUPTS\n", __FUNCTION__);
// Enable interrupts
SXG_ENABLE_ALL_INTERRUPTS(adapter);
DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
return STATUS_SUCCESS;
}
static void __devexit sxg_entry_remove(struct pci_dev *pcidev)
{
p_net_device dev = pci_get_drvdata(pcidev);
u32 mmio_start = 0;
unsigned int mmio_len = 0;
p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
ASSERT(adapter);
DBG_ERROR("sxg: %s ENTER dev[%p] adapter[%p]\n", __FUNCTION__, dev,
adapter);
sxg_deregister_interrupt(adapter);
sxg_unmap_mmio_space(adapter);
DBG_ERROR("sxg: %s unregister_netdev\n", __FUNCTION__);
unregister_netdev(dev);
mmio_start = pci_resource_start(pcidev, 0);
mmio_len = pci_resource_len(pcidev, 0);
DBG_ERROR("sxg: %s rel_region(0) start[%x] len[%x]\n", __FUNCTION__,
mmio_start, mmio_len);
release_mem_region(mmio_start, mmio_len);
DBG_ERROR("sxg: %s iounmap dev->base_addr[%x]\n", __FUNCTION__,
(unsigned int) dev->base_addr);
iounmap((char *)dev->base_addr);
DBG_ERROR("sxg: %s deallocate device\n", __FUNCTION__);
kfree(dev);
DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
}
static int sxg_entry_halt(p_net_device dev)
{
p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
spin_lock_irqsave(&sxg_global.driver_lock, sxg_global.flags);
DBG_ERROR("sxg: %s (%s) ENTER\n", __FUNCTION__, dev->name);
netif_stop_queue(adapter->netdev);
adapter->state = ADAPT_DOWN;
adapter->linkstate = LINK_DOWN;
adapter->devflags_prev = 0;
DBG_ERROR("sxg: %s (%s) set adapter[%p] state to ADAPT_DOWN(%d)\n",
__FUNCTION__, dev->name, adapter, adapter->state);
DBG_ERROR("sxg: %s (%s) EXIT\n", __FUNCTION__, dev->name);
DBG_ERROR("sxg: %s EXIT\n", __FUNCTION__);
spin_unlock_irqrestore(&sxg_global.driver_lock, sxg_global.flags);
return (STATUS_SUCCESS);
}
static int sxg_ioctl(p_net_device dev, struct ifreq *rq, int cmd)
{
ASSERT(rq);
// DBG_ERROR("sxg: %s cmd[%x] rq[%p] dev[%p]\n", __FUNCTION__, cmd, rq, dev);
switch (cmd) {
case SIOCSLICSETINTAGG:
{
// p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
u32 data[7];
u32 intagg;
if (copy_from_user(data, rq->ifr_data, 28)) {
DBG_ERROR
("copy_from_user FAILED getting initial params\n");
return -EFAULT;
}
intagg = data[0];
printk(KERN_EMERG
"%s: set interrupt aggregation to %d\n",
__FUNCTION__, intagg);
return 0;
}
default:
// DBG_ERROR("sxg: %s UNSUPPORTED[%x]\n", __FUNCTION__, cmd);
return -EOPNOTSUPP;
}
return 0;
}
#define NORMAL_ETHFRAME 0
/*
*
* sxg_send_packets - Send a skb packet
*
* Arguments:
* skb - The packet to send
* dev - Our linux net device that refs our adapter
*
* Return:
* 0 regardless of outcome XXXTODO refer to e1000 driver
*/
static int sxg_send_packets(struct sk_buff *skb, p_net_device dev)
{
p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
u32 status = STATUS_SUCCESS;
DBG_ERROR("sxg: %s ENTER sxg_send_packets skb[%p]\n", __FUNCTION__,
skb);
// Check the adapter state
switch (adapter->State) {
case SXG_STATE_INITIALIZING:
case SXG_STATE_HALTED:
case SXG_STATE_SHUTDOWN:
ASSERT(0); // unexpected
// fall through
case SXG_STATE_RESETTING:
case SXG_STATE_SLEEP:
case SXG_STATE_BOOTDIAG:
case SXG_STATE_DIAG:
case SXG_STATE_HALTING:
status = STATUS_FAILURE;
break;
case SXG_STATE_RUNNING:
if (adapter->LinkState != SXG_LINK_UP) {
status = STATUS_FAILURE;
}
break;
default:
ASSERT(0);
status = STATUS_FAILURE;
}
if (status != STATUS_SUCCESS) {
goto xmit_fail;
}
// send a packet
status = sxg_transmit_packet(adapter, skb);
if (status == STATUS_SUCCESS) {
goto xmit_done;
}
xmit_fail:
// reject & complete all the packets if they cant be sent
if (status != STATUS_SUCCESS) {
#if XXXTODO
// sxg_send_packets_fail(adapter, skb, status);
#else
SXG_DROP_DUMB_SEND(adapter, skb);
adapter->stats.tx_dropped++;
#endif
}
DBG_ERROR("sxg: %s EXIT sxg_send_packets status[%x]\n", __FUNCTION__,
status);
xmit_done:
return 0;
}
/*
* sxg_transmit_packet
*
* This function transmits a single packet.
*
* Arguments -
* adapter - Pointer to our adapter structure
* skb - The packet to be sent
*
* Return -
* STATUS of send
*/
static int sxg_transmit_packet(p_adapter_t adapter, struct sk_buff *skb)
{
PSCATTER_GATHER_LIST pSgl;
PSXG_SCATTER_GATHER SxgSgl;
void * SglBuffer;
u32 SglBufferLength;
// The vast majority of work is done in the shared
// sxg_dumb_sgl routine.
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSend",
adapter, skb, 0, 0);
// Allocate a SGL buffer
SXG_GET_SGL_BUFFER(adapter, SxgSgl);
if (!SxgSgl) {
adapter->Stats.NoSglBuf++;
adapter->Stats.XmtErrors++;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "SndPktF1",
adapter, skb, 0, 0);
return (STATUS_RESOURCES);
}
ASSERT(SxgSgl->adapter == adapter);
SglBuffer = SXG_SGL_BUFFER(SxgSgl);
SglBufferLength = SXG_SGL_BUF_SIZE;
SxgSgl->VlanTag.VlanTci = 0;
SxgSgl->VlanTag.VlanTpid = 0;
SxgSgl->Type = SXG_SGL_DUMB;
SxgSgl->DumbPacket = skb;
pSgl = NULL;
// Call the common sxg_dumb_sgl routine to complete the send.
sxg_dumb_sgl(pSgl, SxgSgl);
// Return success sxg_dumb_sgl (or something later) will complete it.
return (STATUS_SUCCESS);
}
/*
* sxg_dumb_sgl
*
* Arguments:
* pSgl -
* SxgSgl - SXG_SCATTER_GATHER
*
* Return Value:
* None.
*/
static void sxg_dumb_sgl(PSCATTER_GATHER_LIST pSgl, PSXG_SCATTER_GATHER SxgSgl)
{
p_adapter_t adapter = SxgSgl->adapter;
struct sk_buff *skb = SxgSgl->DumbPacket;
// For now, all dumb-nic sends go on RSS queue zero
PSXG_XMT_RING XmtRing = &adapter->XmtRings[0];
PSXG_RING_INFO XmtRingInfo = &adapter->XmtRingZeroInfo;
PSXG_CMD XmtCmd = NULL;
// u32 Index = 0;
u32 DataLength = skb->len;
// unsigned int BufLen;
// u32 SglOffset;
u64 phys_addr;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbSgl",
pSgl, SxgSgl, 0, 0);
// Set aside a pointer to the sgl
SxgSgl->pSgl = pSgl;
// Sanity check that our SGL format is as we expect.
ASSERT(sizeof(SXG_X64_SGE) == sizeof(SCATTER_GATHER_ELEMENT));
// Shouldn't be a vlan tag on this frame
ASSERT(SxgSgl->VlanTag.VlanTci == 0);
ASSERT(SxgSgl->VlanTag.VlanTpid == 0);
// From here below we work with the SGL placed in our
// buffer.
SxgSgl->Sgl.NumberOfElements = 1;
// Grab the spinlock and acquire a command
spin_lock(&adapter->XmtZeroLock);
SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
if (XmtCmd == NULL) {
// Call sxg_complete_slow_send to see if we can
// free up any XmtRingZero entries and then try again
spin_unlock(&adapter->XmtZeroLock);
sxg_complete_slow_send(adapter);
spin_lock(&adapter->XmtZeroLock);
SXG_GET_CMD(XmtRing, XmtRingInfo, XmtCmd, SxgSgl);
if (XmtCmd == NULL) {
adapter->Stats.XmtZeroFull++;
goto abortcmd;
}
}
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbCmd",
XmtCmd, XmtRingInfo->Head, XmtRingInfo->Tail, 0);
// Update stats
adapter->Stats.DumbXmtPkts++;
adapter->Stats.DumbXmtBytes += DataLength;
#if XXXTODO // Stats stuff
if (SXG_MULTICAST_PACKET(EtherHdr)) {
if (SXG_BROADCAST_PACKET(EtherHdr)) {
adapter->Stats.DumbXmtBcastPkts++;
adapter->Stats.DumbXmtBcastBytes += DataLength;
} else {
adapter->Stats.DumbXmtMcastPkts++;
adapter->Stats.DumbXmtMcastBytes += DataLength;
}
} else {
adapter->Stats.DumbXmtUcastPkts++;
adapter->Stats.DumbXmtUcastBytes += DataLength;
}
#endif
// Fill in the command
// Copy out the first SGE to the command and adjust for offset
phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len, PCI_DMA_TODEVICE);
XmtCmd->Buffer.FirstSgeAddress = SXG_GET_ADDR_HIGH(phys_addr);
XmtCmd->Buffer.FirstSgeAddress = XmtCmd->Buffer.FirstSgeAddress << 32;
XmtCmd->Buffer.FirstSgeAddress =
XmtCmd->Buffer.FirstSgeAddress | SXG_GET_ADDR_LOW(phys_addr);
// XmtCmd->Buffer.FirstSgeAddress = SxgSgl->Sgl.Elements[Index].Address;
// XmtCmd->Buffer.FirstSgeAddress.LowPart += MdlOffset;
XmtCmd->Buffer.FirstSgeLength = DataLength;
// Set a pointer to the remaining SGL entries
// XmtCmd->Sgl = SxgSgl->PhysicalAddress;
// Advance the physical address of the SxgSgl structure to
// the second SGE
// SglOffset = (u32)((u32 *)(&SxgSgl->Sgl.Elements[Index+1]) -
// (u32 *)SxgSgl);
// XmtCmd->Sgl.LowPart += SglOffset;
XmtCmd->Buffer.SgeOffset = 0;
// Note - TotalLength might be overwritten with MSS below..
XmtCmd->Buffer.TotalLength = DataLength;
XmtCmd->SgEntries = 1; //(ushort)(SxgSgl->Sgl.NumberOfElements - Index);
XmtCmd->Flags = 0;
//
// Advance transmit cmd descripter by 1.
// NOTE - See comments in SxgTcpOutput where we write
// to the XmtCmd register regarding CPU ID values and/or
// multiple commands.
//
//
WRITE_REG(adapter->UcodeRegs[0].XmtCmd, 1, TRUE);
//
//
adapter->Stats.XmtQLen++; // Stats within lock
spin_unlock(&adapter->XmtZeroLock);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XDumSgl2",
XmtCmd, pSgl, SxgSgl, 0);
return;
abortcmd:
// NOTE - Only jump to this label AFTER grabbing the
// XmtZeroLock, and DO NOT DROP IT between the
// command allocation and the following abort.
if (XmtCmd) {
SXG_ABORT_CMD(XmtRingInfo);
}
spin_unlock(&adapter->XmtZeroLock);
// failsgl:
// Jump to this label if failure occurs before the
// XmtZeroLock is grabbed
adapter->Stats.XmtErrors++;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "DumSGFal",
pSgl, SxgSgl, XmtRingInfo->Head, XmtRingInfo->Tail);
SXG_COMPLETE_DUMB_SEND(adapter, SxgSgl->DumbPacket); // SxgSgl->DumbPacket is the skb
}
/***************************************************************
* Link management functions
***************************************************************/
/*
* sxg_initialize_link - Initialize the link stuff
*
* Arguments -
* adapter - A pointer to our adapter structure
*
* Return
* status
*/
static int sxg_initialize_link(p_adapter_t adapter)
{
PSXG_HW_REGS HwRegs = adapter->HwRegs;
u32 Value;
u32 ConfigData;
u32 MaxFrame;
int status;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitLink",
adapter, 0, 0, 0);
// Reset PHY and XGXS module
WRITE_REG(HwRegs->LinkStatus, LS_SERDES_POWER_DOWN, TRUE);
// Reset transmit configuration register
WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_RESET, TRUE);
// Reset receive configuration register
WRITE_REG(HwRegs->RcvConfig, RCV_CONFIG_RESET, TRUE);
// Reset all MAC modules
WRITE_REG(HwRegs->MacConfig0, AXGMAC_CFG0_SUB_RESET, TRUE);
// Link address 0
// XXXTODO - This assumes the MAC address (0a:0b:0c:0d:0e:0f)
// is stored with the first nibble (0a) in the byte 0
// of the Mac address. Possibly reverse?
Value = *(u32 *) adapter->MacAddr;
WRITE_REG(HwRegs->LinkAddress0Low, Value, TRUE);
// also write the MAC address to the MAC. Endian is reversed.
WRITE_REG(HwRegs->MacAddressLow, ntohl(Value), TRUE);
Value = (*(u16 *) & adapter->MacAddr[4] & 0x0000FFFF);
WRITE_REG(HwRegs->LinkAddress0High, Value | LINK_ADDRESS_ENABLE, TRUE);
// endian swap for the MAC (put high bytes in bits [31:16], swapped)
Value = ntohl(Value);
WRITE_REG(HwRegs->MacAddressHigh, Value, TRUE);
// Link address 1
WRITE_REG(HwRegs->LinkAddress1Low, 0, TRUE);
WRITE_REG(HwRegs->LinkAddress1High, 0, TRUE);
// Link address 2
WRITE_REG(HwRegs->LinkAddress2Low, 0, TRUE);
WRITE_REG(HwRegs->LinkAddress2High, 0, TRUE);
// Link address 3
WRITE_REG(HwRegs->LinkAddress3Low, 0, TRUE);
WRITE_REG(HwRegs->LinkAddress3High, 0, TRUE);
// Enable MAC modules
WRITE_REG(HwRegs->MacConfig0, 0, TRUE);
// Configure MAC
WRITE_REG(HwRegs->MacConfig1, (AXGMAC_CFG1_XMT_PAUSE | // Allow sending of pause
AXGMAC_CFG1_XMT_EN | // Enable XMT
AXGMAC_CFG1_RCV_PAUSE | // Enable detection of pause
AXGMAC_CFG1_RCV_EN | // Enable receive
AXGMAC_CFG1_SHORT_ASSERT | // short frame detection
AXGMAC_CFG1_CHECK_LEN | // Verify frame length
AXGMAC_CFG1_GEN_FCS | // Generate FCS
AXGMAC_CFG1_PAD_64), // Pad frames to 64 bytes
TRUE);
// Set AXGMAC max frame length if jumbo. Not needed for standard MTU
if (adapter->JumboEnabled) {
WRITE_REG(HwRegs->MacMaxFrameLen, AXGMAC_MAXFRAME_JUMBO, TRUE);
}
// AMIIM Configuration Register -
// The value placed in the AXGMAC_AMIIM_CFG_HALF_CLOCK portion
// (bottom bits) of this register is used to determine the
// MDC frequency as specified in the A-XGMAC Design Document.
// This value must not be zero. The following value (62 or 0x3E)
// is based on our MAC transmit clock frequency (MTCLK) of 312.5 MHz.
// Given a maximum MDIO clock frequency of 2.5 MHz (see the PHY spec),
// we get: 312.5/(2*(X+1)) < 2.5 ==> X = 62.
// This value happens to be the default value for this register,
// so we really don't have to do this.
WRITE_REG(HwRegs->MacAmiimConfig, 0x0000003E, TRUE);
// Power up and enable PHY and XAUI/XGXS/Serdes logic
WRITE_REG(HwRegs->LinkStatus,
(LS_PHY_CLR_RESET |
LS_XGXS_ENABLE |
LS_XGXS_CTL | LS_PHY_CLK_EN | LS_ATTN_ALARM), TRUE);
DBG_ERROR("After Power Up and enable PHY in sxg_initialize_link\n");
// Per information given by Aeluros, wait 100 ms after removing reset.
// It's not enough to wait for the self-clearing reset bit in reg 0 to clear.
mdelay(100);
// Verify the PHY has come up by checking that the Reset bit has cleared.
status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
PHY_PMA_CONTROL1, // PMA/PMD control register
&Value);
if (status != STATUS_SUCCESS)
return (STATUS_FAILURE);
if (Value & PMA_CONTROL1_RESET) // reset complete if bit is 0
return (STATUS_FAILURE);
// The SERDES should be initialized by now - confirm
READ_REG(HwRegs->LinkStatus, Value);
if (Value & LS_SERDES_DOWN) // verify SERDES is initialized
return (STATUS_FAILURE);
// The XAUI link should also be up - confirm
if (!(Value & LS_XAUI_LINK_UP)) // verify XAUI link is up
return (STATUS_FAILURE);
// Initialize the PHY
status = sxg_phy_init(adapter);
if (status != STATUS_SUCCESS)
return (STATUS_FAILURE);
// Enable the Link Alarm
status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
LASI_CONTROL, // LASI control register
LASI_CTL_LS_ALARM_ENABLE); // enable link alarm bit
if (status != STATUS_SUCCESS)
return (STATUS_FAILURE);
// XXXTODO - temporary - verify bit is set
status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
LASI_CONTROL, // LASI control register
&Value);
if (status != STATUS_SUCCESS)
return (STATUS_FAILURE);
if (!(Value & LASI_CTL_LS_ALARM_ENABLE)) {
DBG_ERROR("Error! LASI Control Alarm Enable bit not set!\n");
}
// Enable receive
MaxFrame = adapter->JumboEnabled ? JUMBOMAXFRAME : ETHERMAXFRAME;
ConfigData = (RCV_CONFIG_ENABLE |
RCV_CONFIG_ENPARSE |
RCV_CONFIG_RCVBAD |
RCV_CONFIG_RCVPAUSE |
RCV_CONFIG_TZIPV6 |
RCV_CONFIG_TZIPV4 |
RCV_CONFIG_HASH_16 |
RCV_CONFIG_SOCKET | RCV_CONFIG_BUFSIZE(MaxFrame));
WRITE_REG(HwRegs->RcvConfig, ConfigData, TRUE);
WRITE_REG(HwRegs->XmtConfig, XMT_CONFIG_ENABLE, TRUE);
// Mark the link as down. We'll get a link event when it comes up.
sxg_link_state(adapter, SXG_LINK_DOWN);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInitLnk",
adapter, 0, 0, 0);
return (STATUS_SUCCESS);
}
/*
* sxg_phy_init - Initialize the PHY
*
* Arguments -
* adapter - A pointer to our adapter structure
*
* Return
* status
*/
static int sxg_phy_init(p_adapter_t adapter)
{
u32 Value;
PPHY_UCODE p;
int status;
DBG_ERROR("ENTER %s\n", __FUNCTION__);
// Read a register to identify the PHY type
status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
0xC205, // PHY ID register (?)
&Value); // XXXTODO - add def
if (status != STATUS_SUCCESS)
return (STATUS_FAILURE);
if (Value == 0x0012) { // 0x0012 == AEL2005C PHY(?) - XXXTODO - add def
DBG_ERROR
("AEL2005C PHY detected. Downloading PHY microcode.\n");
// Initialize AEL2005C PHY and download PHY microcode
for (p = PhyUcode; p->Addr != 0xFFFF; p++) {
if (p->Addr == 0) {
// if address == 0, data == sleep time in ms
mdelay(p->Data);
} else {
// write the given data to the specified address
status = sxg_write_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
p->Addr, // PHY address
p->Data); // PHY data
if (status != STATUS_SUCCESS)
return (STATUS_FAILURE);
}
}
}
DBG_ERROR("EXIT %s\n", __FUNCTION__);
return (STATUS_SUCCESS);
}
/*
* sxg_link_event - Process a link event notification from the card
*
* Arguments -
* adapter - A pointer to our adapter structure
*
* Return
* None
*/
static void sxg_link_event(p_adapter_t adapter)
{
PSXG_HW_REGS HwRegs = adapter->HwRegs;
SXG_LINK_STATE LinkState;
int status;
u32 Value;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "LinkEvnt",
adapter, 0, 0, 0);
DBG_ERROR("ENTER %s\n", __FUNCTION__);
// Check the Link Status register. We should have a Link Alarm.
READ_REG(HwRegs->LinkStatus, Value);
if (Value & LS_LINK_ALARM) {
// We got a Link Status alarm. First, pause to let the
// link state settle (it can bounce a number of times)
mdelay(10);
// Now clear the alarm by reading the LASI status register.
status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
LASI_STATUS, // LASI status register
&Value);
if (status != STATUS_SUCCESS) {
DBG_ERROR("Error reading LASI Status MDIO register!\n");
sxg_link_state(adapter, SXG_LINK_DOWN);
// ASSERT(0);
}
ASSERT(Value & LASI_STATUS_LS_ALARM);
// Now get and set the link state
LinkState = sxg_get_link_state(adapter);
sxg_link_state(adapter, LinkState);
DBG_ERROR("SXG: Link Alarm occurred. Link is %s\n",
((LinkState == SXG_LINK_UP) ? "UP" : "DOWN"));
} else {
// XXXTODO - Assuming Link Attention is only being generated for the
// Link Alarm pin (and not for a XAUI Link Status change), then it's
// impossible to get here. Yet we've gotten here twice (under extreme
// conditions - bouncing the link up and down many times a second).
// Needs further investigation.
DBG_ERROR("SXG: sxg_link_event: Can't get here!\n");
DBG_ERROR("SXG: Link Status == 0x%08X.\n", Value);
// ASSERT(0);
}
DBG_ERROR("EXIT %s\n", __FUNCTION__);
}
/*
* sxg_get_link_state - Determine if the link is up or down
*
* Arguments -
* adapter - A pointer to our adapter structure
*
* Return
* Link State
*/
static SXG_LINK_STATE sxg_get_link_state(p_adapter_t adapter)
{
int status;
u32 Value;
DBG_ERROR("ENTER %s\n", __FUNCTION__);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "GetLink",
adapter, 0, 0, 0);
// Per the Xenpak spec (and the IEEE 10Gb spec?), the link is up if
// the following 3 bits (from 3 different MDIO registers) are all true.
status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PMA, // PHY PMA/PMD module
PHY_PMA_RCV_DET, // PMA/PMD Receive Signal Detect register
&Value);
if (status != STATUS_SUCCESS)
goto bad;
// If PMA/PMD receive signal detect is 0, then the link is down
if (!(Value & PMA_RCV_DETECT))
return (SXG_LINK_DOWN);
status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_PCS, // PHY PCS module
PHY_PCS_10G_STATUS1, // PCS 10GBASE-R Status 1 register
&Value);
if (status != STATUS_SUCCESS)
goto bad;
// If PCS is not locked to receive blocks, then the link is down
if (!(Value & PCS_10B_BLOCK_LOCK))
return (SXG_LINK_DOWN);
status = sxg_read_mdio_reg(adapter, MIIM_DEV_PHY_XS, // PHY XS module
PHY_XS_LANE_STATUS, // XS Lane Status register
&Value);
if (status != STATUS_SUCCESS)
goto bad;
// If XS transmit lanes are not aligned, then the link is down
if (!(Value & XS_LANE_ALIGN))
return (SXG_LINK_DOWN);
// All 3 bits are true, so the link is up
DBG_ERROR("EXIT %s\n", __FUNCTION__);
return (SXG_LINK_UP);
bad:
// An error occurred reading an MDIO register. This shouldn't happen.
DBG_ERROR("Error reading an MDIO register!\n");
ASSERT(0);
return (SXG_LINK_DOWN);
}
static void sxg_indicate_link_state(p_adapter_t adapter, SXG_LINK_STATE LinkState)
{
if (adapter->LinkState == SXG_LINK_UP) {
DBG_ERROR("%s: LINK now UP, call netif_start_queue\n",
__FUNCTION__);
netif_start_queue(adapter->netdev);
} else {
DBG_ERROR("%s: LINK now DOWN, call netif_stop_queue\n",
__FUNCTION__);
netif_stop_queue(adapter->netdev);
}
}
/*
* sxg_link_state - Set the link state and if necessary, indicate.
* This routine the central point of processing for all link state changes.
* Nothing else in the driver should alter the link state or perform
* link state indications
*
* Arguments -
* adapter - A pointer to our adapter structure
* LinkState - The link state
*
* Return
* None
*/
static void sxg_link_state(p_adapter_t adapter, SXG_LINK_STATE LinkState)
{
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "LnkINDCT",
adapter, LinkState, adapter->LinkState, adapter->State);
DBG_ERROR("ENTER %s\n", __FUNCTION__);
// Hold the adapter lock during this routine. Maybe move
// the lock to the caller.
spin_lock(&adapter->AdapterLock);
if (LinkState == adapter->LinkState) {
// Nothing changed..
spin_unlock(&adapter->AdapterLock);
DBG_ERROR("EXIT #0 %s\n", __FUNCTION__);
return;
}
// Save the adapter state
adapter->LinkState = LinkState;
// Drop the lock and indicate link state
spin_unlock(&adapter->AdapterLock);
DBG_ERROR("EXIT #1 %s\n", __FUNCTION__);
sxg_indicate_link_state(adapter, LinkState);
}
/*
* sxg_write_mdio_reg - Write to a register on the MDIO bus
*
* Arguments -
* adapter - A pointer to our adapter structure
* DevAddr - MDIO device number being addressed
* RegAddr - register address for the specified MDIO device
* Value - value to write to the MDIO register
*
* Return
* status
*/
static int sxg_write_mdio_reg(p_adapter_t adapter,
u32 DevAddr, u32 RegAddr, u32 Value)
{
PSXG_HW_REGS HwRegs = adapter->HwRegs;
u32 AddrOp; // Address operation (written to MIIM field reg)
u32 WriteOp; // Write operation (written to MIIM field reg)
u32 Cmd; // Command (written to MIIM command reg)
u32 ValueRead;
u32 Timeout;
// DBG_ERROR("ENTER %s\n", __FUNCTION__);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
adapter, 0, 0, 0);
// Ensure values don't exceed field width
DevAddr &= 0x001F; // 5-bit field
RegAddr &= 0xFFFF; // 16-bit field
Value &= 0xFFFF; // 16-bit field
// Set MIIM field register bits for an MIIM address operation
AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
(DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
(MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
(MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
// Set MIIM field register bits for an MIIM write operation
WriteOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
(DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
(MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
(MIIM_OP_WRITE << AXGMAC_AMIIM_FIELD_OP_SHIFT) | Value;
// Set MIIM command register bits to execute an MIIM command
Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
// Reset the command register command bit (in case it's not 0)
WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
// MIIM write to set the address of the specified MDIO register
WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
// Write to MIIM Command Register to execute to address operation
WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
// Poll AMIIM Indicator register to wait for completion
Timeout = SXG_LINK_TIMEOUT;
do {
udelay(100); // Timeout in 100us units
READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
if (--Timeout == 0) {
return (STATUS_FAILURE);
}
} while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
// Reset the command register command bit
WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
// MIIM write to set up an MDIO write operation
WRITE_REG(HwRegs->MacAmiimField, WriteOp, TRUE);
// Write to MIIM Command Register to execute the write operation
WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
// Poll AMIIM Indicator register to wait for completion
Timeout = SXG_LINK_TIMEOUT;
do {
udelay(100); // Timeout in 100us units
READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
if (--Timeout == 0) {
return (STATUS_FAILURE);
}
} while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
// DBG_ERROR("EXIT %s\n", __FUNCTION__);
return (STATUS_SUCCESS);
}
/*
* sxg_read_mdio_reg - Read a register on the MDIO bus
*
* Arguments -
* adapter - A pointer to our adapter structure
* DevAddr - MDIO device number being addressed
* RegAddr - register address for the specified MDIO device
* pValue - pointer to where to put data read from the MDIO register
*
* Return
* status
*/
static int sxg_read_mdio_reg(p_adapter_t adapter,
u32 DevAddr, u32 RegAddr, u32 * pValue)
{
PSXG_HW_REGS HwRegs = adapter->HwRegs;
u32 AddrOp; // Address operation (written to MIIM field reg)
u32 ReadOp; // Read operation (written to MIIM field reg)
u32 Cmd; // Command (written to MIIM command reg)
u32 ValueRead;
u32 Timeout;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "WrtMDIO",
adapter, 0, 0, 0);
// DBG_ERROR("ENTER %s\n", __FUNCTION__);
// Ensure values don't exceed field width
DevAddr &= 0x001F; // 5-bit field
RegAddr &= 0xFFFF; // 16-bit field
// Set MIIM field register bits for an MIIM address operation
AddrOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
(DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
(MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
(MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT) | RegAddr;
// Set MIIM field register bits for an MIIM read operation
ReadOp = (MIIM_PORT_NUM << AXGMAC_AMIIM_FIELD_PORT_SHIFT) |
(DevAddr << AXGMAC_AMIIM_FIELD_DEV_SHIFT) |
(MIIM_TA_10GB << AXGMAC_AMIIM_FIELD_TA_SHIFT) |
(MIIM_OP_READ << AXGMAC_AMIIM_FIELD_OP_SHIFT);
// Set MIIM command register bits to execute an MIIM command
Cmd = AXGMAC_AMIIM_CMD_START | AXGMAC_AMIIM_CMD_10G_OPERATION;
// Reset the command register command bit (in case it's not 0)
WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
// MIIM write to set the address of the specified MDIO register
WRITE_REG(HwRegs->MacAmiimField, AddrOp, TRUE);
// Write to MIIM Command Register to execute to address operation
WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
// Poll AMIIM Indicator register to wait for completion
Timeout = SXG_LINK_TIMEOUT;
do {
udelay(100); // Timeout in 100us units
READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
if (--Timeout == 0) {
return (STATUS_FAILURE);
}
} while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
// Reset the command register command bit
WRITE_REG(HwRegs->MacAmiimCmd, 0, TRUE);
// MIIM write to set up an MDIO register read operation
WRITE_REG(HwRegs->MacAmiimField, ReadOp, TRUE);
// Write to MIIM Command Register to execute the read operation
WRITE_REG(HwRegs->MacAmiimCmd, Cmd, TRUE);
// Poll AMIIM Indicator register to wait for completion
Timeout = SXG_LINK_TIMEOUT;
do {
udelay(100); // Timeout in 100us units
READ_REG(HwRegs->MacAmiimIndicator, ValueRead);
if (--Timeout == 0) {
return (STATUS_FAILURE);
}
} while (ValueRead & AXGMAC_AMIIM_INDC_BUSY);
// Read the MDIO register data back from the field register
READ_REG(HwRegs->MacAmiimField, *pValue);
*pValue &= 0xFFFF; // data is in the lower 16 bits
// DBG_ERROR("EXIT %s\n", __FUNCTION__);
return (STATUS_SUCCESS);
}
/*
* Allocate a mcast_address structure to hold the multicast address.
* Link it in.
*/
static int sxg_mcast_add_list(p_adapter_t adapter, char *address)
{
p_mcast_address_t mcaddr, mlist;
bool equaladdr;
/* Check to see if it already exists */
mlist = adapter->mcastaddrs;
while (mlist) {
ETHER_EQ_ADDR(mlist->address, address, equaladdr);
if (equaladdr) {
return (STATUS_SUCCESS);
}
mlist = mlist->next;
}
/* Doesn't already exist. Allocate a structure to hold it */
mcaddr = kmalloc(sizeof(mcast_address_t), GFP_ATOMIC);
if (mcaddr == NULL)
return 1;
memcpy(mcaddr->address, address, 6);
mcaddr->next = adapter->mcastaddrs;
adapter->mcastaddrs = mcaddr;
return (STATUS_SUCCESS);
}
/*
* Functions to obtain the CRC corresponding to the destination mac address.
* This is a standard ethernet CRC in that it is a 32-bit, reflected CRC using
* the polynomial:
* x^32 + x^26 + x^23 + x^22 + x^16 + x^12 + x^11 + x^10 + x^8 + x^7 + x^5 + x^4 + x^2 + x^1.
*
* After the CRC for the 6 bytes is generated (but before the value is complemented),
* we must then transpose the value and return bits 30-23.
*
*/
static u32 sxg_crc_table[256]; /* Table of CRC's for all possible byte values */
static u32 sxg_crc_init; /* Is table initialized */
/*
* Contruct the CRC32 table
*/
static void sxg_mcast_init_crc32(void)
{
u32 c; /* CRC shit reg */
u32 e = 0; /* Poly X-or pattern */
int i; /* counter */
int k; /* byte being shifted into crc */
static int p[] = { 0, 1, 2, 4, 5, 7, 8, 10, 11, 12, 16, 22, 23, 26 };
for (i = 0; i < sizeof(p) / sizeof(int); i++) {
e |= 1L << (31 - p[i]);
}
for (i = 1; i < 256; i++) {
c = i;
for (k = 8; k; k--) {
c = c & 1 ? (c >> 1) ^ e : c >> 1;
}
sxg_crc_table[i] = c;
}
}
/*
* Return the MAC hast as described above.
*/
static unsigned char sxg_mcast_get_mac_hash(char *macaddr)
{
u32 crc;
char *p;
int i;
unsigned char machash = 0;
if (!sxg_crc_init) {
sxg_mcast_init_crc32();
sxg_crc_init = 1;
}
crc = 0xFFFFFFFF; /* Preload shift register, per crc-32 spec */
for (i = 0, p = macaddr; i < 6; ++p, ++i) {
crc = (crc >> 8) ^ sxg_crc_table[(crc ^ *p) & 0xFF];
}
/* Return bits 1-8, transposed */
for (i = 1; i < 9; i++) {
machash |= (((crc >> i) & 1) << (8 - i));
}
return (machash);
}
static void sxg_mcast_set_bit(p_adapter_t adapter, char *address)
{
unsigned char crcpoly;
/* Get the CRC polynomial for the mac address */
crcpoly = sxg_mcast_get_mac_hash(address);
/* We only have space on the SLIC for 64 entries. Lop
* off the top two bits. (2^6 = 64)
*/
crcpoly &= 0x3F;
/* OR in the new bit into our 64 bit mask. */
adapter->MulticastMask |= (u64) 1 << crcpoly;
}
static void sxg_mcast_set_list(p_net_device dev)
{
#if XXXTODO
p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
int status = STATUS_SUCCESS;
int i;
char *addresses;
struct dev_mc_list *mc_list = dev->mc_list;
int mc_count = dev->mc_count;
ASSERT(adapter);
for (i = 1; i <= mc_count; i++) {
addresses = (char *) & mc_list->dmi_addr;
if (mc_list->dmi_addrlen == 6) {
status = sxg_mcast_add_list(adapter, addresses);
if (status != STATUS_SUCCESS) {
break;
}
} else {
status = -EINVAL;
break;
}
sxg_mcast_set_bit(adapter, addresses);
mc_list = mc_list->next;
}
DBG_ERROR("%s a->devflags_prev[%x] dev->flags[%x] status[%x]\n",
__FUNCTION__, adapter->devflags_prev, dev->flags, status);
if (adapter->devflags_prev != dev->flags) {
adapter->macopts = MAC_DIRECTED;
if (dev->flags) {
if (dev->flags & IFF_BROADCAST) {
adapter->macopts |= MAC_BCAST;
}
if (dev->flags & IFF_PROMISC) {
adapter->macopts |= MAC_PROMISC;
}
if (dev->flags & IFF_ALLMULTI) {
adapter->macopts |= MAC_ALLMCAST;
}
if (dev->flags & IFF_MULTICAST) {
adapter->macopts |= MAC_MCAST;
}
}
adapter->devflags_prev = dev->flags;
DBG_ERROR("%s call sxg_config_set adapter->macopts[%x]\n",
__FUNCTION__, adapter->macopts);
sxg_config_set(adapter, TRUE);
} else {
if (status == STATUS_SUCCESS) {
sxg_mcast_set_mask(adapter);
}
}
#endif
return;
}
static void sxg_mcast_set_mask(p_adapter_t adapter)
{
PSXG_UCODE_REGS sxg_regs = adapter->UcodeRegs;
DBG_ERROR("%s ENTER (%s) macopts[%x] mask[%llx]\n", __FUNCTION__,
adapter->netdev->name, (unsigned int) adapter->MacFilter,
adapter->MulticastMask);
if (adapter->MacFilter & (MAC_ALLMCAST | MAC_PROMISC)) {
/* Turn on all multicast addresses. We have to do this for promiscuous
* mode as well as ALLMCAST mode. It saves the Microcode from having
* to keep state about the MAC configuration.
*/
// DBG_ERROR("sxg: %s macopts = MAC_ALLMCAST | MAC_PROMISC\n SLUT MODE!!!\n",__FUNCTION__);
WRITE_REG(sxg_regs->McastLow, 0xFFFFFFFF, FLUSH);
WRITE_REG(sxg_regs->McastHigh, 0xFFFFFFFF, FLUSH);
// DBG_ERROR("%s (%s) WRITE to slic_regs slic_mcastlow&high 0xFFFFFFFF\n",__FUNCTION__, adapter->netdev->name);
} else {
/* Commit our multicast mast to the SLIC by writing to the multicast
* address mask registers
*/
DBG_ERROR("%s (%s) WRITE mcastlow[%lx] mcasthigh[%lx]\n",
__FUNCTION__, adapter->netdev->name,
((ulong) (adapter->MulticastMask & 0xFFFFFFFF)),
((ulong)
((adapter->MulticastMask >> 32) & 0xFFFFFFFF)));
WRITE_REG(sxg_regs->McastLow,
(u32) (adapter->MulticastMask & 0xFFFFFFFF),
FLUSH);
WRITE_REG(sxg_regs->McastHigh,
(u32) ((adapter->
MulticastMask >> 32) & 0xFFFFFFFF),
FLUSH);
}
}
static void sxg_unmap_mmio_space(p_adapter_t adapter)
{
#if LINUX_FREES_ADAPTER_RESOURCES
// if (adapter->Regs) {
// iounmap(adapter->Regs);
// }
// adapter->slic_regs = NULL;
#endif
}
#if XXXTODO
/*
* SxgFreeResources - Free everything allocated in SxgAllocateResources
*
* Arguments -
* adapter - A pointer to our adapter structure
*
* Return
* none
*/
void SxgFreeResources(p_adapter_t adapter)
{
u32 RssIds, IsrCount;
PTCP_OBJECT TcpObject;
u32 i;
BOOLEAN TimerCancelled;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "FreeRes",
adapter, adapter->MaxTcbs, 0, 0);
RssIds = SXG_RSS_CPU_COUNT(adapter);
IsrCount = adapter->MsiEnabled ? RssIds : 1;
if (adapter->BasicAllocations == FALSE) {
// No allocations have been made, including spinlocks,
// or listhead initializations. Return.
return;
}
if (!(IsListEmpty(&adapter->AllRcvBlocks))) {
SxgFreeRcvBlocks(adapter);
}
if (!(IsListEmpty(&adapter->AllSglBuffers))) {
SxgFreeSglBuffers(adapter);
}
// Free event queues.
if (adapter->EventRings) {
pci_free_consistent(adapter->pcidev,
sizeof(SXG_EVENT_RING) * RssIds,
adapter->EventRings, adapter->PEventRings);
}
if (adapter->Isr) {
pci_free_consistent(adapter->pcidev,
sizeof(u32) * IsrCount,
adapter->Isr, adapter->PIsr);
}
if (adapter->XmtRingZeroIndex) {
pci_free_consistent(adapter->pcidev,
sizeof(u32),
adapter->XmtRingZeroIndex,
adapter->PXmtRingZeroIndex);
}
if (adapter->IndirectionTable) {
pci_free_consistent(adapter->pcidev,
SXG_MAX_RSS_TABLE_SIZE,
adapter->IndirectionTable,
adapter->PIndirectionTable);
}
SXG_FREE_PACKET_POOL(adapter->PacketPoolHandle);
SXG_FREE_BUFFER_POOL(adapter->BufferPoolHandle);
// Unmap register spaces
SxgUnmapResources(adapter);
// Deregister DMA
if (adapter->DmaHandle) {
SXG_DEREGISTER_DMA(adapter->DmaHandle);
}
// Deregister interrupt
SxgDeregisterInterrupt(adapter);
// Possibly free system info (5.2 only)
SXG_RELEASE_SYSTEM_INFO(adapter);
SxgDiagFreeResources(adapter);
SxgFreeMCastAddrs(adapter);
if (SXG_TIMER_ALLOCATED(adapter->ResetTimer)) {
SXG_CANCEL_TIMER(adapter->ResetTimer, TimerCancelled);
SXG_FREE_TIMER(adapter->ResetTimer);
}
if (SXG_TIMER_ALLOCATED(adapter->RssTimer)) {
SXG_CANCEL_TIMER(adapter->RssTimer, TimerCancelled);
SXG_FREE_TIMER(adapter->RssTimer);
}
if (SXG_TIMER_ALLOCATED(adapter->OffloadTimer)) {
SXG_CANCEL_TIMER(adapter->OffloadTimer, TimerCancelled);
SXG_FREE_TIMER(adapter->OffloadTimer);
}
adapter->BasicAllocations = FALSE;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFreeRes",
adapter, adapter->MaxTcbs, 0, 0);
}
#endif
/*
* sxg_allocate_complete -
*
* This routine is called when a memory allocation has completed.
*
* Arguments -
* p_adapter_t - Our adapter structure
* VirtualAddress - Memory virtual address
* PhysicalAddress - Memory physical address
* Length - Length of memory allocated (or 0)
* Context - The type of buffer allocated
*
* Return
* None.
*/
static void sxg_allocate_complete(p_adapter_t adapter,
void *VirtualAddress,
dma_addr_t PhysicalAddress,
u32 Length, SXG_BUFFER_TYPE Context)
{
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocCmp",
adapter, VirtualAddress, Length, Context);
ASSERT(adapter->AllocationsPending);
--adapter->AllocationsPending;
switch (Context) {
case SXG_BUFFER_TYPE_RCV:
sxg_allocate_rcvblock_complete(adapter,
VirtualAddress,
PhysicalAddress, Length);
break;
case SXG_BUFFER_TYPE_SGL:
sxg_allocate_sgl_buffer_complete(adapter,
(PSXG_SCATTER_GATHER)
VirtualAddress,
PhysicalAddress, Length);
break;
}
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocCmp",
adapter, VirtualAddress, Length, Context);
}
/*
* sxg_allocate_buffer_memory - Shared memory allocation routine used for
* synchronous and asynchronous buffer allocations
*
* Arguments -
* adapter - A pointer to our adapter structure
* Size - block size to allocate
* BufferType - Type of buffer to allocate
*
* Return
* int
*/
static int sxg_allocate_buffer_memory(p_adapter_t adapter,
u32 Size, SXG_BUFFER_TYPE BufferType)
{
int status;
void * Buffer;
dma_addr_t pBuffer;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AllocMem",
adapter, Size, BufferType, 0);
// Grab the adapter lock and check the state.
// If we're in anything other than INITIALIZING or
// RUNNING state, fail. This is to prevent
// allocations in an improper driver state
spin_lock(&adapter->AdapterLock);
// Increment the AllocationsPending count while holding
// the lock. Pause processing relies on this
++adapter->AllocationsPending;
spin_unlock(&adapter->AdapterLock);
// At initialization time allocate resources synchronously.
Buffer = pci_alloc_consistent(adapter->pcidev, Size, &pBuffer);
if (Buffer == NULL) {
spin_lock(&adapter->AdapterLock);
// Decrement the AllocationsPending count while holding
// the lock. Pause processing relies on this
--adapter->AllocationsPending;
spin_unlock(&adapter->AdapterLock);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlcMemF1",
adapter, Size, BufferType, 0);
return (STATUS_RESOURCES);
}
sxg_allocate_complete(adapter, Buffer, pBuffer, Size, BufferType);
status = STATUS_SUCCESS;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlocMem",
adapter, Size, BufferType, status);
return (status);
}
/*
* sxg_allocate_rcvblock_complete - Complete a receive descriptor block allocation
*
* Arguments -
* adapter - A pointer to our adapter structure
* RcvBlock - receive block virtual address
* PhysicalAddress - Physical address
* Length - Memory length
*
* Return
*
*/
static void sxg_allocate_rcvblock_complete(p_adapter_t adapter,
void * RcvBlock,
dma_addr_t PhysicalAddress, u32 Length)
{
u32 i;
u32 BufferSize = adapter->ReceiveBufferSize;
u64 Paddr;
PSXG_RCV_BLOCK_HDR RcvBlockHdr;
unsigned char *RcvDataBuffer;
PSXG_RCV_DATA_BUFFER_HDR RcvDataBufferHdr;
PSXG_RCV_DESCRIPTOR_BLOCK RcvDescriptorBlock;
PSXG_RCV_DESCRIPTOR_BLOCK_HDR RcvDescriptorBlockHdr;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlRcvBlk",
adapter, RcvBlock, Length, 0);
if (RcvBlock == NULL) {
goto fail;
}
memset(RcvBlock, 0, Length);
ASSERT((BufferSize == SXG_RCV_DATA_BUFFER_SIZE) ||
(BufferSize == SXG_RCV_JUMBO_BUFFER_SIZE));
ASSERT(Length == SXG_RCV_BLOCK_SIZE(BufferSize));
// First, initialize the contained pool of receive data
// buffers. This initialization requires NBL/NB/MDL allocations,
// If any of them fail, free the block and return without
// queueing the shared memory
RcvDataBuffer = RcvBlock;
#if 0
for (i = 0, Paddr = *PhysicalAddress;
i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
i++, Paddr.LowPart += BufferSize, RcvDataBuffer += BufferSize)
#endif
for (i = 0, Paddr = PhysicalAddress;
i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
i++, Paddr += BufferSize, RcvDataBuffer += BufferSize) {
//
RcvDataBufferHdr =
(PSXG_RCV_DATA_BUFFER_HDR) (RcvDataBuffer +
SXG_RCV_DATA_BUFFER_HDR_OFFSET
(BufferSize));
RcvDataBufferHdr->VirtualAddress = RcvDataBuffer;
RcvDataBufferHdr->PhysicalAddress = Paddr;
RcvDataBufferHdr->State = SXG_BUFFER_UPSTREAM; // For FREE macro assertion
RcvDataBufferHdr->Size =
SXG_RCV_BUFFER_DATA_SIZE(BufferSize);
SXG_ALLOCATE_RCV_PACKET(adapter, RcvDataBufferHdr);
if (RcvDataBufferHdr->SxgDumbRcvPacket == NULL)
goto fail;
}
// Place this entire block of memory on the AllRcvBlocks queue so it can be
// free later
RcvBlockHdr =
(PSXG_RCV_BLOCK_HDR) ((unsigned char *)RcvBlock +
SXG_RCV_BLOCK_HDR_OFFSET(BufferSize));
RcvBlockHdr->VirtualAddress = RcvBlock;
RcvBlockHdr->PhysicalAddress = PhysicalAddress;
spin_lock(&adapter->RcvQLock);
adapter->AllRcvBlockCount++;
InsertTailList(&adapter->AllRcvBlocks, &RcvBlockHdr->AllList);
spin_unlock(&adapter->RcvQLock);
// Now free the contained receive data buffers that we initialized above
RcvDataBuffer = RcvBlock;
for (i = 0, Paddr = PhysicalAddress;
i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
i++, Paddr += BufferSize, RcvDataBuffer += BufferSize) {
RcvDataBufferHdr = (PSXG_RCV_DATA_BUFFER_HDR) (RcvDataBuffer +
SXG_RCV_DATA_BUFFER_HDR_OFFSET
(BufferSize));
spin_lock(&adapter->RcvQLock);
SXG_FREE_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
spin_unlock(&adapter->RcvQLock);
}
// Locate the descriptor block and put it on a separate free queue
RcvDescriptorBlock = (PSXG_RCV_DESCRIPTOR_BLOCK) ((unsigned char *)RcvBlock +
SXG_RCV_DESCRIPTOR_BLOCK_OFFSET
(BufferSize));
RcvDescriptorBlockHdr =
(PSXG_RCV_DESCRIPTOR_BLOCK_HDR) ((unsigned char *)RcvBlock +
SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET
(BufferSize));
RcvDescriptorBlockHdr->VirtualAddress = RcvDescriptorBlock;
RcvDescriptorBlockHdr->PhysicalAddress = Paddr;
spin_lock(&adapter->RcvQLock);
SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter, RcvDescriptorBlockHdr);
spin_unlock(&adapter->RcvQLock);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlRBlk",
adapter, RcvBlock, Length, 0);
return;
fail:
// Free any allocated resources
if (RcvBlock) {
RcvDataBuffer = RcvBlock;
for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK;
i++, RcvDataBuffer += BufferSize) {
RcvDataBufferHdr =
(PSXG_RCV_DATA_BUFFER_HDR) (RcvDataBuffer +
SXG_RCV_DATA_BUFFER_HDR_OFFSET
(BufferSize));
SXG_FREE_RCV_PACKET(RcvDataBufferHdr);
}
pci_free_consistent(adapter->pcidev,
Length, RcvBlock, PhysicalAddress);
}
DBG_ERROR("%s: OUT OF RESOURCES\n", __FUNCTION__);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_IMPORTANT, "RcvAFail",
adapter, adapter->FreeRcvBufferCount,
adapter->FreeRcvBlockCount, adapter->AllRcvBlockCount);
adapter->Stats.NoMem++;
}
/*
* sxg_allocate_sgl_buffer_complete - Complete a SGL buffer allocation
*
* Arguments -
* adapter - A pointer to our adapter structure
* SxgSgl - SXG_SCATTER_GATHER buffer
* PhysicalAddress - Physical address
* Length - Memory length
*
* Return
*
*/
static void sxg_allocate_sgl_buffer_complete(p_adapter_t adapter,
PSXG_SCATTER_GATHER SxgSgl,
dma_addr_t PhysicalAddress, u32 Length)
{
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "AlSglCmp",
adapter, SxgSgl, Length, 0);
spin_lock(&adapter->SglQLock);
adapter->AllSglBufferCount++;
memset(SxgSgl, 0, sizeof(SXG_SCATTER_GATHER));
SxgSgl->PhysicalAddress = PhysicalAddress; /* *PhysicalAddress; */
SxgSgl->adapter = adapter; // Initialize backpointer once
InsertTailList(&adapter->AllSglBuffers, &SxgSgl->AllList);
spin_unlock(&adapter->SglQLock);
SxgSgl->State = SXG_BUFFER_BUSY;
SXG_FREE_SGL_BUFFER(adapter, SxgSgl, NULL);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XAlSgl",
adapter, SxgSgl, Length, 0);
}
static unsigned char temp_mac_address[6] = { 0x00, 0xab, 0xcd, 0xef, 0x12, 0x69 };
static void sxg_adapter_set_hwaddr(p_adapter_t adapter)
{
// DBG_ERROR ("%s ENTER card->config_set[%x] port[%d] physport[%d] funct#[%d]\n", __FUNCTION__,
// card->config_set, adapter->port, adapter->physport, adapter->functionnumber);
//
// sxg_dbg_macaddrs(adapter);
memcpy(adapter->macaddr, temp_mac_address, sizeof(SXG_CONFIG_MAC));
// DBG_ERROR ("%s AFTER copying from config.macinfo into currmacaddr\n", __FUNCTION__);
// sxg_dbg_macaddrs(adapter);
if (!(adapter->currmacaddr[0] ||
adapter->currmacaddr[1] ||
adapter->currmacaddr[2] ||
adapter->currmacaddr[3] ||
adapter->currmacaddr[4] || adapter->currmacaddr[5])) {
memcpy(adapter->currmacaddr, adapter->macaddr, 6);
}
if (adapter->netdev) {
memcpy(adapter->netdev->dev_addr, adapter->currmacaddr, 6);
}
// DBG_ERROR ("%s EXIT port %d\n", __FUNCTION__, adapter->port);
sxg_dbg_macaddrs(adapter);
}
static int sxg_mac_set_address(p_net_device dev, void * ptr)
{
#if XXXTODO
p_adapter_t adapter = (p_adapter_t) netdev_priv(dev);
struct sockaddr *addr = ptr;
DBG_ERROR("%s ENTER (%s)\n", __FUNCTION__, adapter->netdev->name);
if (netif_running(dev)) {
return -EBUSY;
}
if (!adapter) {
return -EBUSY;
}
DBG_ERROR("sxg: %s (%s) curr %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
__FUNCTION__, adapter->netdev->name, adapter->currmacaddr[0],
adapter->currmacaddr[1], adapter->currmacaddr[2],
adapter->currmacaddr[3], adapter->currmacaddr[4],
adapter->currmacaddr[5]);
memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
memcpy(adapter->currmacaddr, addr->sa_data, dev->addr_len);
DBG_ERROR("sxg: %s (%s) new %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n",
__FUNCTION__, adapter->netdev->name, adapter->currmacaddr[0],
adapter->currmacaddr[1], adapter->currmacaddr[2],
adapter->currmacaddr[3], adapter->currmacaddr[4],
adapter->currmacaddr[5]);
sxg_config_set(adapter, TRUE);
#endif
return 0;
}
/*****************************************************************************/
/************* SXG DRIVER FUNCTIONS (below) ********************************/
/*****************************************************************************/
/*
* sxg_initialize_adapter - Initialize adapter
*
* Arguments -
* adapter - A pointer to our adapter structure
*
* Return
* int
*/
static int sxg_initialize_adapter(p_adapter_t adapter)
{
u32 RssIds, IsrCount;
u32 i;
int status;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "InitAdpt",
adapter, 0, 0, 0);
RssIds = 1; // XXXTODO SXG_RSS_CPU_COUNT(adapter);
IsrCount = adapter->MsiEnabled ? RssIds : 1;
// Sanity check SXG_UCODE_REGS structure definition to
// make sure the length is correct
ASSERT(sizeof(SXG_UCODE_REGS) == SXG_REGISTER_SIZE_PER_CPU);
// Disable interrupts
SXG_DISABLE_ALL_INTERRUPTS(adapter);
// Set MTU
ASSERT((adapter->FrameSize == ETHERMAXFRAME) ||
(adapter->FrameSize == JUMBOMAXFRAME));
WRITE_REG(adapter->UcodeRegs[0].LinkMtu, adapter->FrameSize, TRUE);
// Set event ring base address and size
WRITE_REG64(adapter,
adapter->UcodeRegs[0].EventBase, adapter->PEventRings, 0);
WRITE_REG(adapter->UcodeRegs[0].EventSize, EVENT_RING_SIZE, TRUE);
// Per-ISR initialization
for (i = 0; i < IsrCount; i++) {
u64 Addr;
// Set interrupt status pointer
Addr = adapter->PIsr + (i * sizeof(u32));
WRITE_REG64(adapter, adapter->UcodeRegs[i].Isp, Addr, i);
}
// XMT ring zero index
WRITE_REG64(adapter,
adapter->UcodeRegs[0].SPSendIndex,
adapter->PXmtRingZeroIndex, 0);
// Per-RSS initialization
for (i = 0; i < RssIds; i++) {
// Release all event ring entries to the Microcode
WRITE_REG(adapter->UcodeRegs[i].EventRelease, EVENT_RING_SIZE,
TRUE);
}
// Transmit ring base and size
WRITE_REG64(adapter,
adapter->UcodeRegs[0].XmtBase, adapter->PXmtRings, 0);
WRITE_REG(adapter->UcodeRegs[0].XmtSize, SXG_XMT_RING_SIZE, TRUE);
// Receive ring base and size
WRITE_REG64(adapter,
adapter->UcodeRegs[0].RcvBase, adapter->PRcvRings, 0);
WRITE_REG(adapter->UcodeRegs[0].RcvSize, SXG_RCV_RING_SIZE, TRUE);
// Populate the card with receive buffers
sxg_stock_rcv_buffers(adapter);
// Initialize checksum offload capabilities. At the moment
// we always enable IP and TCP receive checksums on the card.
// Depending on the checksum configuration specified by the
// user, we can choose to report or ignore the checksum
// information provided by the card.
WRITE_REG(adapter->UcodeRegs[0].ReceiveChecksum,
SXG_RCV_TCP_CSUM_ENABLED | SXG_RCV_IP_CSUM_ENABLED, TRUE);
// Initialize the MAC, XAUI
DBG_ERROR("sxg: %s ENTER sxg_initialize_link\n", __FUNCTION__);
status = sxg_initialize_link(adapter);
DBG_ERROR("sxg: %s EXIT sxg_initialize_link status[%x]\n", __FUNCTION__,
status);
if (status != STATUS_SUCCESS) {
return (status);
}
// Initialize Dead to FALSE.
// SlicCheckForHang or SlicDumpThread will take it from here.
adapter->Dead = FALSE;
adapter->PingOutstanding = FALSE;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XInit",
adapter, 0, 0, 0);
return (STATUS_SUCCESS);
}
/*
* sxg_fill_descriptor_block - Populate a descriptor block and give it to
* the card. The caller should hold the RcvQLock
*
* Arguments -
* adapter - A pointer to our adapter structure
* RcvDescriptorBlockHdr - Descriptor block to fill
*
* Return
* status
*/
static int sxg_fill_descriptor_block(p_adapter_t adapter,
PSXG_RCV_DESCRIPTOR_BLOCK_HDR RcvDescriptorBlockHdr)
{
u32 i;
PSXG_RING_INFO RcvRingInfo = &adapter->RcvRingZeroInfo;
PSXG_RCV_DATA_BUFFER_HDR RcvDataBufferHdr;
PSXG_RCV_DESCRIPTOR_BLOCK RcvDescriptorBlock;
PSXG_CMD RingDescriptorCmd;
PSXG_RCV_RING RingZero = &adapter->RcvRings[0];
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "FilBlk",
adapter, adapter->RcvBuffersOnCard,
adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
ASSERT(RcvDescriptorBlockHdr);
// If we don't have the resources to fill the descriptor block,
// return failure
if ((adapter->FreeRcvBufferCount < SXG_RCV_DESCRIPTORS_PER_BLOCK) ||
SXG_RING_FULL(RcvRingInfo)) {
adapter->Stats.NoMem++;
return (STATUS_FAILURE);
}
// Get a ring descriptor command
SXG_GET_CMD(RingZero,
RcvRingInfo, RingDescriptorCmd, RcvDescriptorBlockHdr);
ASSERT(RingDescriptorCmd);
RcvDescriptorBlockHdr->State = SXG_BUFFER_ONCARD;
RcvDescriptorBlock =
(PSXG_RCV_DESCRIPTOR_BLOCK) RcvDescriptorBlockHdr->VirtualAddress;
// Fill in the descriptor block
for (i = 0; i < SXG_RCV_DESCRIPTORS_PER_BLOCK; i++) {
SXG_GET_RCV_DATA_BUFFER(adapter, RcvDataBufferHdr);
ASSERT(RcvDataBufferHdr);
SXG_REINIATIALIZE_PACKET(RcvDataBufferHdr->SxgDumbRcvPacket);
RcvDataBufferHdr->State = SXG_BUFFER_ONCARD;
RcvDescriptorBlock->Descriptors[i].VirtualAddress = (void *)RcvDataBufferHdr;
RcvDescriptorBlock->Descriptors[i].PhysicalAddress =
RcvDataBufferHdr->PhysicalAddress;
}
// Add the descriptor block to receive descriptor ring 0
RingDescriptorCmd->Sgl = RcvDescriptorBlockHdr->PhysicalAddress;
// RcvBuffersOnCard is not protected via the receive lock (see
// sxg_process_event_queue) We don't want to grap a lock every time a
// buffer is returned to us, so we use atomic interlocked functions
// instead.
adapter->RcvBuffersOnCard += SXG_RCV_DESCRIPTORS_PER_BLOCK;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DscBlk",
RcvDescriptorBlockHdr,
RingDescriptorCmd, RcvRingInfo->Head, RcvRingInfo->Tail);
WRITE_REG(adapter->UcodeRegs[0].RcvCmd, 1, true);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlk",
adapter, adapter->RcvBuffersOnCard,
adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
return (STATUS_SUCCESS);
}
/*
* sxg_stock_rcv_buffers - Stock the card with receive buffers
*
* Arguments -
* adapter - A pointer to our adapter structure
*
* Return
* None
*/
static void sxg_stock_rcv_buffers(p_adapter_t adapter)
{
PSXG_RCV_DESCRIPTOR_BLOCK_HDR RcvDescriptorBlockHdr;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "StockBuf",
adapter, adapter->RcvBuffersOnCard,
adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
// First, see if we've got less than our minimum threshold of
// receive buffers, there isn't an allocation in progress, and
// we haven't exceeded our maximum.. get another block of buffers
// None of this needs to be SMP safe. It's round numbers.
if ((adapter->FreeRcvBufferCount < SXG_MIN_RCV_DATA_BUFFERS) &&
(adapter->AllRcvBlockCount < SXG_MAX_RCV_BLOCKS) &&
(adapter->AllocationsPending == 0)) {
sxg_allocate_buffer_memory(adapter,
SXG_RCV_BLOCK_SIZE(adapter->
ReceiveBufferSize),
SXG_BUFFER_TYPE_RCV);
}
// Now grab the RcvQLock lock and proceed
spin_lock(&adapter->RcvQLock);
while (adapter->RcvBuffersOnCard < SXG_RCV_DATA_BUFFERS) {
PLIST_ENTRY _ple;
// Get a descriptor block
RcvDescriptorBlockHdr = NULL;
if (adapter->FreeRcvBlockCount) {
_ple = RemoveHeadList(&adapter->FreeRcvBlocks);
RcvDescriptorBlockHdr = container_of(_ple, SXG_RCV_DESCRIPTOR_BLOCK_HDR, FreeList);
adapter->FreeRcvBlockCount--;
RcvDescriptorBlockHdr->State = SXG_BUFFER_BUSY;
}
if (RcvDescriptorBlockHdr == NULL) {
// Bail out..
adapter->Stats.NoMem++;
break;
}
// Fill in the descriptor block and give it to the card
if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
STATUS_FAILURE) {
// Free the descriptor block
SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
RcvDescriptorBlockHdr);
break;
}
}
spin_unlock(&adapter->RcvQLock);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XFilBlks",
adapter, adapter->RcvBuffersOnCard,
adapter->FreeRcvBufferCount, adapter->AllRcvBlockCount);
}
/*
* sxg_complete_descriptor_blocks - Return descriptor blocks that have been
* completed by the microcode
*
* Arguments -
* adapter - A pointer to our adapter structure
* Index - Where the microcode is up to
*
* Return
* None
*/
static void sxg_complete_descriptor_blocks(p_adapter_t adapter, unsigned char Index)
{
PSXG_RCV_RING RingZero = &adapter->RcvRings[0];
PSXG_RING_INFO RcvRingInfo = &adapter->RcvRingZeroInfo;
PSXG_RCV_DESCRIPTOR_BLOCK_HDR RcvDescriptorBlockHdr;
PSXG_CMD RingDescriptorCmd;
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlks",
adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
// Now grab the RcvQLock lock and proceed
spin_lock(&adapter->RcvQLock);
ASSERT(Index != RcvRingInfo->Tail);
while (RcvRingInfo->Tail != Index) {
//
// Locate the current Cmd (ring descriptor entry), and
// associated receive descriptor block, and advance
// the tail
//
SXG_RETURN_CMD(RingZero,
RcvRingInfo,
RingDescriptorCmd, RcvDescriptorBlockHdr);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "CmpRBlk",
RcvRingInfo->Head, RcvRingInfo->Tail,
RingDescriptorCmd, RcvDescriptorBlockHdr);
// Clear the SGL field
RingDescriptorCmd->Sgl = 0;
// Attempt to refill it and hand it right back to the
// card. If we fail to refill it, free the descriptor block
// header. The card will be restocked later via the
// RcvBuffersOnCard test
if (sxg_fill_descriptor_block(adapter, RcvDescriptorBlockHdr) ==
STATUS_FAILURE) {
SXG_FREE_RCV_DESCRIPTOR_BLOCK(adapter,
RcvDescriptorBlockHdr);
}
}
spin_unlock(&adapter->RcvQLock);
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "XCRBlks",
adapter, Index, RcvRingInfo->Head, RcvRingInfo->Tail);
}
static struct pci_driver sxg_driver = {
.name = DRV_NAME,
.id_table = sxg_pci_tbl,
.probe = sxg_entry_probe,
.remove = sxg_entry_remove,
#if SXG_POWER_MANAGEMENT_ENABLED
.suspend = sxgpm_suspend,
.resume = sxgpm_resume,
#endif
/* .shutdown = slic_shutdown, MOOK_INVESTIGATE */
};
static int __init sxg_module_init(void)
{
sxg_init_driver();
if (debug >= 0)
sxg_debug = debug;
return pci_register_driver(&sxg_driver);
}
static void __exit sxg_module_cleanup(void)
{
pci_unregister_driver(&sxg_driver);
}
module_init(sxg_module_init);
module_exit(sxg_module_cleanup);
/**************************************************************************
*
* Copyright 2000-2008 Alacritech, Inc. All rights reserved.
*
* $Id: sxg.h,v 1.3 2008/07/24 17:25:08 chris Exp $
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation
* are those of the authors and should not be interpreted as representing
* official policies, either expressed or implied, of Alacritech, Inc.
*
**************************************************************************/
/*
* FILENAME: sxg.h
*
* This is the base set of header definitions for the SXG driver.
*/
#ifndef __SXG_DRIVER_H__
#define __SXG_DRIVER_H__
#define p_net_device struct net_device *
// SXG_STATS - Probably move these to someplace where
// the slicstat (sxgstat?) program can get them.
typedef struct _SXG_STATS {
// Xmt
u32 XmtNBL; // Offload send NBL count
u64 DumbXmtBytes; // Dumbnic send bytes
u64 SlowXmtBytes; // Slowpath send bytes
u64 FastXmtBytes; // Fastpath send bytes
u64 DumbXmtPkts; // Dumbnic send packets
u64 SlowXmtPkts; // Slowpath send packets
u64 FastXmtPkts; // Fastpath send packets
u64 DumbXmtUcastPkts; // directed packets
u64 DumbXmtMcastPkts; // Multicast packets
u64 DumbXmtBcastPkts; // OID_GEN_BROADCAST_FRAMES_RCV
u64 DumbXmtUcastBytes; // OID_GEN_DIRECTED_BYTES_XMIT
u64 DumbXmtMcastBytes; // OID_GEN_MULTICAST_BYTES_XMIT
u64 DumbXmtBcastBytes; // OID_GEN_BROADCAST_BYTES_XMIT
u64 XmtErrors; // OID_GEN_XMIT_ERROR
u64 XmtDiscards; // OID_GEN_XMIT_DISCARDS
u64 XmtOk; // OID_GEN_XMIT_OK
u64 XmtQLen; // OID_GEN_TRANSMIT_QUEUE_LENGTH
u64 XmtZeroFull; // Transmit ring zero full
// Rcv
u32 RcvNBL; // Offload recieve NBL count
u64 DumbRcvBytes; // dumbnic recv bytes
u64 DumbRcvUcastBytes; // OID_GEN_DIRECTED_BYTES_RCV
u64 DumbRcvMcastBytes; // OID_GEN_MULTICAST_BYTES_RCV
u64 DumbRcvBcastBytes; // OID_GEN_BROADCAST_BYTES_RCV
u64 SlowRcvBytes; // Slowpath recv bytes
u64 FastRcvBytes; // Fastpath recv bytes
u64 DumbRcvPkts; // OID_GEN_DIRECTED_FRAMES_RCV
u64 DumbRcvTcpPkts; // See SxgCollectStats
u64 DumbRcvUcastPkts; // directed packets
u64 DumbRcvMcastPkts; // Multicast packets
u64 DumbRcvBcastPkts; // OID_GEN_BROADCAST_FRAMES_RCV
u64 SlowRcvPkts; // OID_GEN_DIRECTED_FRAMES_RCV
u64 RcvErrors; // OID_GEN_RCV_ERROR
u64 RcvDiscards; // OID_GEN_RCV_DISCARDS
u64 RcvNoBuffer; // OID_GEN_RCV_NO_BUFFER
u64 PdqFull; // Processed Data Queue Full
u64 EventRingFull; // Event ring full
// Verbose stats
u64 MaxSends; // Max sends outstanding
u64 NoSglBuf; // SGL buffer allocation failure
u64 SglFail; // NDIS SGL failure
u64 SglAsync; // NDIS SGL failure
u64 NoMem; // Memory allocation failure
u64 NumInts; // Interrupts
u64 FalseInts; // Interrupt with ISR == 0
u64 XmtDrops; // No sahara DRAM buffer for xmt
// Sahara receive status
u64 TransportCsum; // SXG_RCV_STATUS_TRANSPORT_CSUM
u64 TransportUflow; // SXG_RCV_STATUS_TRANSPORT_UFLOW
u64 TransportHdrLen; // SXG_RCV_STATUS_TRANSPORT_HDRLEN
u64 NetworkCsum; // SXG_RCV_STATUS_NETWORK_CSUM:
u64 NetworkUflow; // SXG_RCV_STATUS_NETWORK_UFLOW:
u64 NetworkHdrLen; // SXG_RCV_STATUS_NETWORK_HDRLEN:
u64 Parity; // SXG_RCV_STATUS_PARITY
u64 LinkParity; // SXG_RCV_STATUS_LINK_PARITY:
u64 LinkEarly; // SXG_RCV_STATUS_LINK_EARLY:
u64 LinkBufOflow; // SXG_RCV_STATUS_LINK_BUFOFLOW:
u64 LinkCode; // SXG_RCV_STATUS_LINK_CODE:
u64 LinkDribble; // SXG_RCV_STATUS_LINK_DRIBBLE:
u64 LinkCrc; // SXG_RCV_STATUS_LINK_CRC:
u64 LinkOflow; // SXG_RCV_STATUS_LINK_OFLOW:
u64 LinkUflow; // SXG_RCV_STATUS_LINK_UFLOW:
} SXG_STATS, *PSXG_STATS;
/****************************************************************************
* DUMB-NIC Send path definitions
****************************************************************************/
#define SXG_COMPLETE_DUMB_SEND(_pAdapt, _skb) { \
ASSERT(_skb); \
dev_kfree_skb_irq(_skb); \
}
#define SXG_DROP_DUMB_SEND(_pAdapt, _skb) { \
ASSERT(_skb); \
dev_kfree_skb(_skb); \
}
// Locate current receive header buffer location. Use this
// instead of RcvDataHdr->VirtualAddress since the data
// may have been offset by SXG_ADVANCE_MDL_OFFSET
#define SXG_RECEIVE_DATA_LOCATION(_RcvDataHdr) (_RcvDataHdr)->skb->data
/************************************************************************
* Dumb-NIC receive processing
************************************************************************/
// Define an SXG_PACKET as an NDIS_PACKET
#define PSXG_PACKET struct sk_buff *
// Indications array size
#define SXG_RCV_ARRAYSIZE 64
#define SXG_ALLOCATE_RCV_PACKET(_pAdapt, _RcvDataBufferHdr) { \
struct sk_buff * skb; \
skb = alloc_skb(2048, GFP_ATOMIC); \
if (skb) { \
(_RcvDataBufferHdr)->skb = skb; \
skb->next = NULL; \
} else { \
(_RcvDataBufferHdr)->skb = NULL; \
} \
}
#define SXG_FREE_RCV_PACKET(_RcvDataBufferHdr) { \
if((_RcvDataBufferHdr)->skb) { \
dev_kfree_skb((_RcvDataBufferHdr)->skb); \
} \
}
// Macro to add a NDIS_PACKET to an indication array
// If we fill up our array of packet pointers, then indicate this
// block up now and start on a new one.
#define SXG_ADD_RCV_PACKET(_pAdapt, _Packet, _PrevPacket, _IndicationList, _NumPackets) { \
(_IndicationList)[_NumPackets] = (_Packet); \
(_NumPackets)++; \
if((_NumPackets) == SXG_RCV_ARRAYSIZE) { \
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "IndicRcv", \
(_NumPackets), 0, 0, 0); \
netif_rx((_IndicationList),(_NumPackets)); \
(_NumPackets) = 0; \
} \
}
#define SXG_INDICATE_PACKETS(_pAdapt, _IndicationList, _NumPackets) { \
if(_NumPackets) { \
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "IndicRcv", \
(_NumPackets), 0, 0, 0); \
netif_rx((_IndicationList),(_NumPackets)); \
(_NumPackets) = 0; \
} \
}
#define SXG_REINIATIALIZE_PACKET(_Packet) \
{} /*_NdisReinitializePacket(_Packet)*/ /* this is not necessary with an skb */
// Definitions to initialize Dumb-nic Receive NBLs
#define SXG_RCV_PACKET_BUFFER_HDR(_Packet) (((PSXG_RCV_NBL_RESERVED)((_Packet)->MiniportReservedEx))->RcvDataBufferHdr)
#define SXG_RCV_SET_CHECKSUM_INFO(_Packet, _Cpi) \
NDIS_PER_PACKET_INFO_FROM_PACKET((_Packet), TcpIpChecksumPacketInfo) = (PVOID)(_Cpi)
#define SXG_RCV_SET_TOEPLITZ(_Packet, _Toeplitz, _Type, _Function) { \
NDIS_PACKET_SET_HASH_VALUE((_Packet), (_Toeplitz)); \
NDIS_PACKET_SET_HASH_TYPE((_Packet), (_Type)); \
NDIS_PACKET_SET_HASH_FUNCTION((_Packet), (_Function)); \
}
#define SXG_RCV_SET_VLAN_INFO(_Packet, _VlanId, _Priority) { \
NDIS_PACKET_8021Q_INFO _Packet8021qInfo; \
_Packet8021qInfo.TagHeader.VlanId = (_VlanId); \
_Packet8021qInfo.TagHeader.UserPriority = (_Priority); \
NDIS_PER_PACKET_INFO_FROM_PACKET((_Packet), Ieee8021QNetBufferListInfo) = \
_Packet8021qInfo.Value; \
}
#define SXG_ADJUST_RCV_PACKET(_Packet, _RcvDataBufferHdr, _Event) { \
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "DumbRcv", \
(_RcvDataBufferHdr), (_Packet), \
(_Event)->Status, 0); \
ASSERT((_Event)->Length <= (_RcvDataBufferHdr)->Size); \
Packet->len = (_Event)->Length; \
}
///////////////////////////////////////////////////////////////////////////////
// Macros to free a receive data buffer and receive data descriptor block
///////////////////////////////////////////////////////////////////////////////
// NOTE - Lock must be held with RCV macros
#define SXG_GET_RCV_DATA_BUFFER(_pAdapt, _Hdr) { \
PLIST_ENTRY _ple; \
_Hdr = NULL; \
if((_pAdapt)->FreeRcvBufferCount) { \
ASSERT(!(IsListEmpty(&(_pAdapt)->FreeRcvBuffers))); \
_ple = RemoveHeadList(&(_pAdapt)->FreeRcvBuffers); \
(_Hdr) = container_of(_ple, SXG_RCV_DATA_BUFFER_HDR, FreeList); \
(_pAdapt)->FreeRcvBufferCount--; \
ASSERT((_Hdr)->State == SXG_BUFFER_FREE); \
} \
}
#define SXG_FREE_RCV_DATA_BUFFER(_pAdapt, _Hdr) { \
SXG_TRACE(TRACE_SXG, SxgTraceBuffer, TRACE_NOISY, "RtnDHdr", \
(_Hdr), (_pAdapt)->FreeRcvBufferCount, \
(_Hdr)->State, (_Hdr)->VirtualAddress); \
/* SXG_RESTORE_MDL_OFFSET(_Hdr); */ \
(_pAdapt)->FreeRcvBufferCount++; \
ASSERT(((_pAdapt)->AllRcvBlockCount * SXG_RCV_DESCRIPTORS_PER_BLOCK) >= (_pAdapt)->FreeRcvBufferCount); \
ASSERT((_Hdr)->State != SXG_BUFFER_FREE); \
(_Hdr)->State = SXG_BUFFER_FREE; \
InsertTailList(&(_pAdapt)->FreeRcvBuffers, &((_Hdr)->FreeList)); \
}
#define SXG_FREE_RCV_DESCRIPTOR_BLOCK(_pAdapt, _Hdr) { \
ASSERT((_Hdr)->State != SXG_BUFFER_FREE); \
(_Hdr)->State = SXG_BUFFER_FREE; \
(_pAdapt)->FreeRcvBlockCount++; \
ASSERT((_pAdapt)->AllRcvBlockCount >= (_pAdapt)->FreeRcvBlockCount); \
InsertTailList(&(_pAdapt)->FreeRcvBlocks, &(_Hdr)->FreeList); \
}
// SGL macros
#define SXG_FREE_SGL_BUFFER(_pAdapt, _Sgl, _NB) { \
spin_lock(&(_pAdapt)->SglQLock); \
(_pAdapt)->FreeSglBufferCount++; \
ASSERT((_pAdapt)->AllSglBufferCount >= (_pAdapt)->FreeSglBufferCount);\
ASSERT(!((_Sgl)->State & SXG_BUFFER_FREE)); \
(_Sgl)->State = SXG_BUFFER_FREE; \
InsertTailList(&(_pAdapt)->FreeSglBuffers, &(_Sgl)->FreeList); \
spin_unlock(&(_pAdapt)->SglQLock); \
}
// Get an SGL buffer from the free queue. The first part of this macro
// attempts to keep ahead of buffer depletion by allocating more when
// we hit a minimum threshold. Note that we don't grab the lock
// until after that. We're dealing with round numbers here, so we don't need to,
// and not grabbing it avoids a possible double-trip.
#define SXG_GET_SGL_BUFFER(_pAdapt, _Sgl) { \
PLIST_ENTRY _ple; \
if ((_pAdapt->FreeSglBufferCount < SXG_MIN_SGL_BUFFERS) && \
(_pAdapt->AllSglBufferCount < SXG_MAX_SGL_BUFFERS) && \
(_pAdapt->AllocationsPending == 0)) { \
sxg_allocate_buffer_memory(_pAdapt, \
(sizeof(SXG_SCATTER_GATHER) + SXG_SGL_BUF_SIZE),\
SXG_BUFFER_TYPE_SGL); \
} \
_Sgl = NULL; \
spin_lock(&(_pAdapt)->SglQLock); \
if((_pAdapt)->FreeSglBufferCount) { \
ASSERT(!(IsListEmpty(&(_pAdapt)->FreeSglBuffers))); \
_ple = RemoveHeadList(&(_pAdapt)->FreeSglBuffers); \
(_Sgl) = container_of(_ple, SXG_SCATTER_GATHER, FreeList); \
(_pAdapt)->FreeSglBufferCount--; \
ASSERT((_Sgl)->State == SXG_BUFFER_FREE); \
(_Sgl)->State = SXG_BUFFER_BUSY; \
(_Sgl)->pSgl = NULL; \
} \
spin_unlock(&(_pAdapt)->SglQLock); \
}
//
// SXG_MULTICAST_ADDRESS
//
// Linked list of multicast addresses.
typedef struct _SXG_MULTICAST_ADDRESS {
unsigned char Address[6];
struct _SXG_MULTICAST_ADDRESS *Next;
} SXG_MULTICAST_ADDRESS, *PSXG_MULTICAST_ADDRESS;
// Structure to maintain chimney send and receive buffer queues.
// This structure maintains NET_BUFFER_LIST queues that are
// given to us via the Chimney MiniportTcpOffloadSend and
// MiniportTcpOffloadReceive routines. This structure DOES NOT
// manage our data buffer queue
typedef struct _SXG_BUFFER_QUEUE {
u32 Type; // Slow or fast - See below
u32 Direction; // Xmt or Rcv
u32 Bytes; // Byte count
u32 * Head; // Send queue head
u32 * Tail; // Send queue tail
// PNET_BUFFER_LIST NextNBL; // Short cut - next NBL
// PNET_BUFFER NextNB; // Short cut - next NB
} SXG_BUFFER_QUEUE, *PSXG_BUFFER_QUEUE;
#define SXG_SLOW_SEND_BUFFER 0
#define SXG_FAST_SEND_BUFFER 1
#define SXG_RECEIVE_BUFFER 2
#define SXG_INIT_BUFFER(_Buffer, _Type) { \
(_Buffer)->Type = (_Type); \
if((_Type) == SXG_RECEIVE_BUFFER) { \
(_Buffer)->Direction = 0; \
} else { \
(_Buffer)->Direction = NDIS_SG_LIST_WRITE_TO_DEVICE; \
} \
(_Buffer)->Bytes = 0; \
(_Buffer)->Head = NULL; \
(_Buffer)->Tail = NULL; \
}
#define SXG_RSS_CPU_COUNT(_pAdapt) \
((_pAdapt)->RssEnabled ? NR_CPUS : 1)
/****************************************************************************
* DRIVER and ADAPTER structures
****************************************************************************/
// Adapter states - These states closely match the adapter states
// documented in the DDK (with a few exceptions).
typedef enum _SXG_STATE {
SXG_STATE_INITIALIZING, // Initializing
SXG_STATE_BOOTDIAG, // Boot-Diagnostic mode
SXG_STATE_PAUSING, // Pausing
SXG_STATE_PAUSED, // Paused
SXG_STATE_RUNNING, // Running
SXG_STATE_RESETTING, // Reset in progress
SXG_STATE_SLEEP, // Sleeping
SXG_STATE_DIAG, // Diagnostic mode
SXG_STATE_HALTING, // Halting
SXG_STATE_HALTED, // Down or not-initialized
SXG_STATE_SHUTDOWN // shutdown
} SXG_STATE, *PSXG_STATE;
// Link state
typedef enum _SXG_LINK_STATE {
SXG_LINK_DOWN,
SXG_LINK_UP
} SXG_LINK_STATE, *PSXG_LINK_STATE;
// Link initialization timeout in 100us units
#define SXG_LINK_TIMEOUT 100000 // 10 Seconds - REDUCE!
// Microcode file selection codes
typedef enum _SXG_UCODE_SEL {
SXG_UCODE_SAHARA, // Sahara ucode
SXG_UCODE_SDIAGCPU, // Sahara CPU diagnostic ucode
SXG_UCODE_SDIAGSYS // Sahara system diagnostic ucode
} SXG_UCODE_SEL;
#define SXG_DISABLE_ALL_INTERRUPTS(_padapt) sxg_disable_interrupt(_padapt)
#define SXG_ENABLE_ALL_INTERRUPTS(_padapt) sxg_enable_interrupt(_padapt)
// This probably lives in a proto.h file. Move later
#define SXG_MULTICAST_PACKET(_pether) ((_pether)->ether_dhost[0] & 0x01)
#define SXG_BROADCAST_PACKET(_pether) ((*(u32 *)(_pether)->ether_dhost == 0xFFFFFFFF) && \
(*(u16 *)&(_pether)->ether_dhost[4] == 0xFFFF))
// For DbgPrints
#define SXG_ID DPFLTR_IHVNETWORK_ID
#define SXG_ERROR DPFLTR_ERROR_LEVEL
//
// SXG_DRIVER structure -
//
// contains information about the sxg driver. There is only
// one of these, and it is defined as a global.
typedef struct _SXG_DRIVER {
struct _adapter_t *Adapters; // Linked list of adapters
ushort AdapterID; // Maintain unique adapter ID
} SXG_DRIVER, *PSXG_DRIVER;
#ifdef STATUS_SUCCESS
#undef STATUS_SUCCESS
#endif
#define STATUS_SUCCESS 0
#define STATUS_PENDING 0
#define STATUS_FAILURE -1
#define STATUS_ERROR -2
#define STATUS_NOT_SUPPORTED -3
#define STATUS_BUFFER_TOO_SHORT -4
#define STATUS_RESOURCES -5
#define SLIC_MAX_CARDS 32
#define SLIC_MAX_PORTS 4 /* Max # of ports per card */
#if SLIC_DUMP_ENABLED
// Dump buffer size
//
// This cannot be bigger than the max DMA size the card supports,
// given the current code structure in the host and ucode.
// Mojave supports 16K, Oasis supports 16K-1, so
// just set this at 15K, shouldnt make that much of a diff.
#define DUMP_BUF_SIZE 0x3C00
#endif
#define MIN(a, b) ((u32)(a) < (u32)(b) ? (a) : (b))
#define MAX(a, b) ((u32)(a) > (u32)(b) ? (a) : (b))
typedef struct _mcast_address_t
{
unsigned char address[6];
struct _mcast_address_t *next;
} mcast_address_t, *p_mcast_address_t;
#define CARD_DOWN 0x00000000
#define CARD_UP 0x00000001
#define CARD_FAIL 0x00000002
#define CARD_DIAG 0x00000003
#define CARD_SLEEP 0x00000004
#define ADAPT_DOWN 0x00
#define ADAPT_UP 0x01
#define ADAPT_FAIL 0x02
#define ADAPT_RESET 0x03
#define ADAPT_SLEEP 0x04
#define ADAPT_FLAGS_BOOTTIME 0x0001
#define ADAPT_FLAGS_IS64BIT 0x0002
#define ADAPT_FLAGS_PENDINGLINKDOWN 0x0004
#define ADAPT_FLAGS_FIBERMEDIA 0x0008
#define ADAPT_FLAGS_LOCKS_ALLOCED 0x0010
#define ADAPT_FLAGS_INT_REGISTERED 0x0020
#define ADAPT_FLAGS_LOAD_TIMER_SET 0x0040
#define ADAPT_FLAGS_STATS_TIMER_SET 0x0080
#define ADAPT_FLAGS_RESET_TIMER_SET 0x0100
#define LINK_DOWN 0x00
#define LINK_CONFIG 0x01
#define LINK_UP 0x02
#define LINK_10MB 0x00
#define LINK_100MB 0x01
#define LINK_AUTOSPEED 0x02
#define LINK_1000MB 0x03
#define LINK_10000MB 0x04
#define LINK_HALFD 0x00
#define LINK_FULLD 0x01
#define LINK_AUTOD 0x02
#define MAC_DIRECTED 0x00000001
#define MAC_BCAST 0x00000002
#define MAC_MCAST 0x00000004
#define MAC_PROMISC 0x00000008
#define MAC_LOOPBACK 0x00000010
#define MAC_ALLMCAST 0x00000020
#define SLIC_DUPLEX(x) ((x==LINK_FULLD) ? "FDX" : "HDX")
#define SLIC_SPEED(x) ((x==LINK_100MB) ? "100Mb" : ((x==LINK_1000MB) ? "1000Mb" : " 10Mb"))
#define SLIC_LINKSTATE(x) ((x==LINK_DOWN) ? "Down" : "Up ")
#define SLIC_ADAPTER_STATE(x) ((x==ADAPT_UP) ? "UP" : "Down")
#define SLIC_CARD_STATE(x) ((x==CARD_UP) ? "UP" : "Down")
typedef struct _ether_header
{
unsigned char ether_dhost[6];
unsigned char ether_shost[6];
ushort ether_type;
} ether_header, *p_ether_header;
#define NUM_CFG_SPACES 2
#define NUM_CFG_REGS 64
typedef struct _physcard_t
{
struct _adapter_t *adapter[SLIC_MAX_PORTS];
struct _physcard_t *next;
unsigned int adapters_allocd;
} physcard_t, *p_physcard_t;
typedef struct _sxgbase_driver
{
spinlock_t driver_lock;
unsigned long flags; /* irqsave for spinlock */
u32 num_sxg_cards;
u32 num_sxg_ports;
u32 num_sxg_ports_active;
u32 dynamic_intagg;
p_physcard_t phys_card;
} sxgbase_driver_t;
typedef struct _adapter_t
{
void * ifp;
unsigned int port;
p_physcard_t physcard;
unsigned int physport;
unsigned int cardindex;
unsigned int card_size;
unsigned int chipid;
unsigned int busnumber;
unsigned int slotnumber;
unsigned int functionnumber;
ushort vendid;
ushort devid;
ushort subsysid;
u32 irq;
void * sxg_adapter;
u32 nBusySend;
void __iomem * base_addr;
u32 memorylength;
u32 drambase;
u32 dramlength;
unsigned int queues_initialized;
unsigned int allocated;
unsigned int activated;
u32 intrregistered;
unsigned int isp_initialized;
unsigned int gennumber;
u32 curaddrupper;
u32 isrcopy;
unsigned char state;
unsigned char linkstate;
unsigned char linkspeed;
unsigned char linkduplex;
unsigned int flags;
unsigned char macaddr[6];
unsigned char currmacaddr[6];
u32 macopts;
ushort devflags_prev;
u64 mcastmask;
p_mcast_address_t mcastaddrs;
struct timer_list pingtimer;
u32 pingtimerset;
struct timer_list statstimer;
u32 statstimerset;
struct timer_list vpci_timer;
u32 vpci_timerset;
struct timer_list loadtimer;
u32 loadtimerset;
u32 xmitq_full;
u32 all_reg_writes;
u32 icr_reg_writes;
u32 isr_reg_writes;
u32 error_interrupts;
u32 error_rmiss_interrupts;
u32 rx_errors;
u32 rcv_drops;
u32 rcv_interrupts;
u32 xmit_interrupts;
u32 linkevent_interrupts;
u32 upr_interrupts;
u32 num_isrs;
u32 false_interrupts;
u32 tx_packets;
u32 xmit_completes;
u32 tx_drops;
u32 rcv_broadcasts;
u32 rcv_multicasts;
u32 rcv_unicasts;
u32 max_isr_rcvs;
u32 max_isr_xmits;
u32 rcv_interrupt_yields;
u32 intagg_period;
struct net_device_stats stats;
u32 * MiniportHandle; // Our miniport handle
SXG_STATE State; // Adapter state
SXG_LINK_STATE LinkState; // Link state
u64 LinkSpeed; // Link Speed
u32 PowerState; // NDIS power state
struct _adapter_t *Next; // Linked list
ushort AdapterID; // 1..n
unsigned char MacAddr[6]; // Our permanent HW mac address
unsigned char CurrMacAddr[6]; // Our Current mac address
p_net_device netdev;
p_net_device next_netdevice;
struct pci_dev * pcidev;
PSXG_MULTICAST_ADDRESS MulticastAddrs; // Multicast list
u64 MulticastMask; // Multicast mask
u32 * InterruptHandle; // Register Interrupt handle
u32 InterruptLevel; // From Resource list
u32 InterruptVector; // From Resource list
spinlock_t AdapterLock; /* Serialize access adapter routines */
spinlock_t Bit64RegLock; /* For writing 64-bit addresses */
PSXG_HW_REGS HwRegs; // Sahara HW Register Memory (BAR0/1)
PSXG_UCODE_REGS UcodeRegs; // Microcode Register Memory (BAR2/3)
PSXG_TCB_REGS TcbRegs; // Same as Ucode regs - See sxghw.h
ushort ResetDpcCount; // For timeout
ushort RssDpcCount; // For timeout
ushort VendorID; // Vendor ID
ushort DeviceID; // Device ID
ushort SubSystemID; // Sub-System ID
ushort FrameSize; // Maximum frame size
u32 * DmaHandle; // NDIS DMA handle
u32 * PacketPoolHandle; // Used with NDIS 5.2 only. Don't ifdef out
u32 * BufferPoolHandle; // Used with NDIS 5.2 only. Don't ifdef out
u32 MacFilter; // NDIS MAC Filter
ushort IpId; // For slowpath
PSXG_EVENT_RING EventRings; // Host event rings. 1/CPU to 16 max
dma_addr_t PEventRings; // Physical address
u32 NextEvent[SXG_MAX_RSS]; // Current location in ring
dma_addr_t PTcbBuffers; // TCB Buffers - physical address
dma_addr_t PTcbCompBuffers; // TCB Composite Buffers - phys addr
PSXG_XMT_RING XmtRings; // Transmit rings
dma_addr_t PXmtRings; // Transmit rings - physical address
SXG_RING_INFO XmtRingZeroInfo; // Transmit ring 0 info
spinlock_t XmtZeroLock; /* Transmit ring 0 lock */
u32 * XmtRingZeroIndex; // Shared XMT ring 0 index
dma_addr_t PXmtRingZeroIndex; // Shared XMT ring 0 index - physical
LIST_ENTRY FreeProtocolHeaders;// Free protocol headers
u32 FreeProtoHdrCount; // Count
void * ProtocolHeaders; // Block of protocol header
dma_addr_t PProtocolHeaders; // Block of protocol headers - phys
PSXG_RCV_RING RcvRings; // Receive rings
dma_addr_t PRcvRings; // Receive rings - physical address
SXG_RING_INFO RcvRingZeroInfo; // Receive ring 0 info
u32 * Isr; // Interrupt status register
dma_addr_t PIsr; // ISR - physical address
u32 IsrCopy[SXG_MAX_RSS]; // Copy of ISR
ushort InterruptsEnabled; // Bitmask of enabled vectors
unsigned char * IndirectionTable; // RSS indirection table
dma_addr_t PIndirectionTable; // Physical address
ushort RssTableSize; // From NDIS_RECEIVE_SCALE_PARAMETERS
ushort HashKeySize; // From NDIS_RECEIVE_SCALE_PARAMETERS
unsigned char HashSecretKey[40]; // rss key
u32 HashInformation;
// Receive buffer queues
spinlock_t RcvQLock; /* Receive Queue Lock */
LIST_ENTRY FreeRcvBuffers; // Free SXG_DATA_BUFFER queue
LIST_ENTRY FreeRcvBlocks; // Free SXG_RCV_DESCRIPTOR_BLOCK Q
LIST_ENTRY AllRcvBlocks; // All SXG_RCV_BLOCKs
ushort FreeRcvBufferCount; // Number of free rcv data buffers
ushort FreeRcvBlockCount; // # of free rcv descriptor blocks
ushort AllRcvBlockCount; // Number of total receive blocks
ushort ReceiveBufferSize; // SXG_RCV_DATA/JUMBO_BUFFER_SIZE only
u32 AllocationsPending; // Receive allocation pending
u32 RcvBuffersOnCard; // SXG_DATA_BUFFERS owned by card
// SGL buffers
spinlock_t SglQLock; /* SGL Queue Lock */
LIST_ENTRY FreeSglBuffers; // Free SXG_SCATTER_GATHER
LIST_ENTRY AllSglBuffers; // All SXG_SCATTER_GATHER
ushort FreeSglBufferCount; // Number of free SGL buffers
ushort AllSglBufferCount; // Number of total SGL buffers
u32 CurrentTime; // Tick count
u32 FastpathConnections;// # of fastpath connections
// Various single-bit flags:
u32 BasicAllocations:1; // Locks and listheads
u32 IntRegistered:1; // Interrupt registered
u32 PingOutstanding:1; // Ping outstanding to card
u32 Dead:1; // Card dead
u32 DumpDriver:1; // OID_SLIC_DRIVER_DUMP request
u32 DumpCard:1; // OID_SLIC_CARD_DUMP request
u32 DumpCmdRunning:1; // Dump command in progress
u32 DebugRunning:1; // AGDB debug in progress
u32 JumboEnabled:1; // Jumbo frames enabled
u32 MsiEnabled:1; // MSI interrupt enabled
u32 RssEnabled:1; // RSS Enabled
u32 FailOnBadEeprom:1; // Fail on Bad Eeprom
u32 DiagStart:1; // Init adapter for diagnostic start
// Stats
u32 PendingRcvCount; // Outstanding rcv indications
u32 PendingXmtCount; // Outstanding send requests
SXG_STATS Stats; // Statistics
u32 ReassBufs; // Number of reassembly buffers
// Card Crash Info
ushort CrashLocation; // Microcode crash location
unsigned char CrashCpu; // Sahara CPU ID
// Diagnostics
// PDIAG_CMD DiagCmds; // List of free diagnostic commands
// PDIAG_BUFFER DiagBuffers; // List of free diagnostic buffers
// PDIAG_REQ DiagReqQ; // List of outstanding (asynchronous) diag requests
// u32 DiagCmdTimeout; // Time out for diag cmds (seconds) XXXTODO - replace with SXG_PARAM var?
// unsigned char DiagDmaDesc[DMA_CPU_CTXS]; // Free DMA descriptors bit field (32 CPU ctx * 8 DMA ctx)
/////////////////////////////////////////////////////////////////////
// Put preprocessor-conditional fields at the end so we don't
// have to recompile sxgdbg everytime we reconfigure the driver
/////////////////////////////////////////////////////////////////////
void * PendingSetRss; // Pending RSS parameter change
u32 IPv4HdrSize; // Shared 5.2/6.0 encap param
unsigned char * InterruptInfo; // Allocated by us during AddDevice
#if defined(CONFIG_X86)
u32 AddrUpper; // Upper 32 bits of 64-bit register
#endif
//#if SXG_FAILURE_DUMP
// NDIS_EVENT DumpThreadEvent; // syncronize dump thread
// BOOLEAN DumpThreadRunning; // termination flag
// PSXG_DUMP_CMD DumpBuffer; // 68k - Cmd and Buffer
// dma_addr_t PDumpBuffer; // Physical address
//#endif // SXG_FAILURE_DUMP
} adapter_t, *p_adapter_t;
#if SLIC_DUMP_ENABLED
#define SLIC_DUMP_REQUESTED 1
#define SLIC_DUMP_IN_PROGRESS 2
#define SLIC_DUMP_DONE 3
/****************************************************************************
*
* Microcode crash information structure. This
* structure is written out to the card's SRAM when the microcode panic's.
*
****************************************************************************/
typedef struct _slic_crash_info {
ushort cpu_id;
ushort crash_pc;
} slic_crash_info, *p_slic_crash_info;
#define CRASH_INFO_OFFSET 0x155C
#endif
#define UPDATE_STATS(largestat, newstat, oldstat) \
{ \
if ((newstat) < (oldstat)) \
(largestat) += ((newstat) + (0xFFFFFFFF - oldstat + 1)); \
else \
(largestat) += ((newstat) - (oldstat)); \
}
#define UPDATE_STATS_GB(largestat, newstat, oldstat) \
{ \
(largestat) += ((newstat) - (oldstat)); \
}
#define ETHER_EQ_ADDR(_AddrA, _AddrB, _Result) \
{ \
_Result = TRUE; \
if (*(u32 *)(_AddrA) != *(u32 *)(_AddrB)) \
_Result = FALSE; \
if (*(u16 *)(&((_AddrA)[4])) != *(u16 *)(&((_AddrB)[4]))) \
_Result = FALSE; \
}
#define ETHERMAXFRAME 1514
#define JUMBOMAXFRAME 9014
#if defined(CONFIG_X86_64) || defined(CONFIG_IA64)
#define SXG_GET_ADDR_LOW(_addr) (u32)((u64)(_addr) & 0x00000000FFFFFFFF)
#define SXG_GET_ADDR_HIGH(_addr) (u32)(((u64)(_addr) >> 32) & 0x00000000FFFFFFFF)
#else
#define SXG_GET_ADDR_LOW(_addr) (u32)_addr
#define SXG_GET_ADDR_HIGH(_addr) (u32)0
#endif
#define FLUSH TRUE
#define DONT_FLUSH FALSE
#define SIOCSLICDUMPCARD SIOCDEVPRIVATE+9
#define SIOCSLICSETINTAGG SIOCDEVPRIVATE+10
#define SIOCSLICTRACEDUMP SIOCDEVPRIVATE+11
#endif /* __SXG_DRIVER_H__ */
/**************************************************************************
*
* Copyright (C) 2000-2008 Alacritech, Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation
* are those of the authors and should not be interpreted as representing
* official policies, either expressed or implied, of Alacritech, Inc.
*
**************************************************************************/
/*
* FILENAME: sxg_os.h
*
* These are the Linux-specific definitions required for the SLICOSS
* driver, which should allow for greater portability to other OSes.
*/
#ifndef _SLIC_OS_SPECIFIC_H_
#define _SLIC_OS_SPECIFIC_H_
#define FALSE (0)
#define TRUE (1)
typedef struct _LIST_ENTRY {
struct _LIST_ENTRY *nle_flink;
struct _LIST_ENTRY *nle_blink;
} list_entry, LIST_ENTRY, *PLIST_ENTRY;
#define InitializeListHead(l) \
(l)->nle_flink = (l)->nle_blink = (l)
#define IsListEmpty(h) \
((h)->nle_flink == (h))
#define RemoveEntryList(e) \
do { \
list_entry *b; \
list_entry *f; \
\
f = (e)->nle_flink; \
b = (e)->nle_blink; \
b->nle_flink = f; \
f->nle_blink = b; \
} while (0)
/* These two have to be inlined since they return things. */
static __inline PLIST_ENTRY
RemoveHeadList(list_entry *l)
{
list_entry *f;
list_entry *e;
e = l->nle_flink;
f = e->nle_flink;
l->nle_flink = f;
f->nle_blink = l;
return (e);
}
static __inline PLIST_ENTRY
RemoveTailList(list_entry *l)
{
list_entry *b;
list_entry *e;
e = l->nle_blink;
b = e->nle_blink;
l->nle_blink = b;
b->nle_flink = l;
return (e);
}
#define InsertTailList(l, e) \
do { \
list_entry *b; \
\
b = (l)->nle_blink; \
(e)->nle_flink = (l); \
(e)->nle_blink = b; \
b->nle_flink = (e); \
(l)->nle_blink = (e); \
} while (0)
#define InsertHeadList(l, e) \
do { \
list_entry *f; \
\
f = (l)->nle_flink; \
(e)->nle_flink = f; \
(e)->nle_blink = l; \
f->nle_blink = (e); \
(l)->nle_flink = (e); \
} while (0)
#define ATK_DEBUG 1
#if ATK_DEBUG
#define SLIC_TIMESTAMP(value) { \
struct timeval timev; \
do_gettimeofday(&timev); \
value = timev.tv_sec*1000000 + timev.tv_usec; \
}
#else
#define SLIC_TIMESTAMP(value)
#endif
/****************** SXG DEFINES *****************************************/
#ifdef ATKDBG
#define SXG_TIMESTAMP(value) { \
struct timeval timev; \
do_gettimeofday(&timev); \
value = timev.tv_sec*1000000 + timev.tv_usec; \
}
#else
#define SXG_TIMESTAMP(value)
#endif
#define WRITE_REG(reg,value,flush) sxg_reg32_write((&reg), (value), (flush))
#define WRITE_REG64(a,reg,value,cpu) sxg_reg64_write((a),(&reg),(value),(cpu))
#define READ_REG(reg,value) (value) = readl((void __iomem *)(&reg))
#endif /* _SLIC_OS_SPECIFIC_H_ */
/**************************************************************************
*
* Copyright © 2000-2008 Alacritech, Inc. All rights reserved.
*
* $Id: sxgdbg.h,v 1.1 2008/06/27 12:49:28 mook Exp $
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY ALACRITECH, INC. ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ALACRITECH, INC. OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The views and conclusions contained in the software and documentation
* are those of the authors and should not be interpreted as representing
* official policies, either expressed or implied, of Alacritech, Inc.
*
**************************************************************************/
/*
* FILENAME: sxgdbg.h
*
* All debug and assertion-based definitions and macros are included
* in this file for the SXGOSS driver.
*/
#ifndef _SXG_DEBUG_H_
#define _SXG_DEBUG_H_
#define ATKDBG 1
#define ATK_TRACE_ENABLED 1
#define DBG_ERROR(n, args...) printk(KERN_EMERG n, ##args)
#ifdef ASSERT
#undef ASSERT
#endif
#ifdef SXG_ASSERT_ENABLED
#ifndef ASSERT
#define ASSERT(a) \
{ \
if (!(a)) { \
DBG_ERROR("ASSERT() Failure: file %s, function %s line %d\n",\
__FILE__, __FUNCTION__, __LINE__); \
} \
}
#endif
#else
#ifndef ASSERT
#define ASSERT(a)
#endif
#endif /* SXG_ASSERT_ENABLED */
#ifdef ATKDBG
/*
* Global for timer granularity; every driver must have an instance
* of this initialized to 0
*/
extern ulong ATKTimerDiv;
/*
* trace_entry_t -
*
* This structure defines an entry in the trace buffer. The
* first few fields mean the same from entry to entry, while
* the meaning of last several fields change to suit the
* needs of the trace entry. Typically they are function call
* parameters.
*/
typedef struct _trace_entry_s {
char name[8]; /* 8 character name - like 's'i'm'b'a'r'c'v' */
u32 time; /* Current clock tic */
unsigned char cpu; /* Current CPU */
unsigned char irql; /* Current IRQL */
unsigned char driver; /* The driver which added the trace call */
unsigned char pad2; /* pad to 4 byte boundary - will probably get used */
u32 arg1; /* Caller arg1 */
u32 arg2; /* Caller arg2 */
u32 arg3; /* Caller arg3 */
u32 arg4; /* Caller arg4 */
} trace_entry_t, *ptrace_entry_t;
/*
* Driver types for driver field in trace_entry_t
*/
#define TRACE_SXG 1
#define TRACE_VPCI 2
#define TRACE_SLIC 3
#define TRACE_ENTRIES 1024
typedef struct _sxg_trace_buffer_t
{
unsigned int size; /* aid for windbg extension */
unsigned int in; /* Where to add */
unsigned int level; /* Current Trace level */
spinlock_t lock; /* For MP tracing */
trace_entry_t entries[TRACE_ENTRIES];/* The circular buffer */
} sxg_trace_buffer_t;
/*
* The trace levels
*
* XXX At the moment I am only defining critical, important, and noisy.
* I am leaving room for more if anyone wants them.
*/
#define TRACE_NONE 0 /* For trace level - if no tracing wanted */
#define TRACE_CRITICAL 1 /* minimal tracing - only critical stuff */
#define TRACE_IMPORTANT 5 /* more tracing - anything important */
#define TRACE_NOISY 10 /* Everything in the world */
/**********************************************************************
*
* The macros themselves -
*
*********************************************************************/
#if ATK_TRACE_ENABLED
#define SXG_TRACE_INIT(buffer, tlevel) \
{ \
memset((buffer), 0, sizeof(sxg_trace_buffer_t)); \
(buffer)->level = (tlevel); \
(buffer)->size = TRACE_ENTRIES; \
spin_lock_init(&(buffer)->lock); \
}
#else
#define SXG_TRACE_INIT(buffer, tlevel)
#endif
/*
* The trace macro. This is active only if ATK_TRACE_ENABLED is set.
*/
#if ATK_TRACE_ENABLED
#define SXG_TRACE(tdriver, buffer, tlevel, tname, a1, a2, a3, a4) { \
if ((buffer) && ((buffer)->level >= (tlevel))) { \
unsigned int trace_irql = 0; /* ?????? FIX THIS */ \
unsigned int trace_len; \
ptrace_entry_t trace_entry; \
struct timeval timev; \
\
spin_lock(&(buffer)->lock); \
trace_entry = &(buffer)->entries[(buffer)->in]; \
do_gettimeofday(&timev); \
\
memset(trace_entry->name, 0, 8); \
trace_len = strlen(tname); \
trace_len = trace_len > 8 ? 8 : trace_len; \
memcpy(trace_entry->name, (tname), trace_len); \
trace_entry->time = timev.tv_usec; \
trace_entry->cpu = (unsigned char)(smp_processor_id() & 0xFF); \
trace_entry->driver = (tdriver); \
trace_entry->irql = trace_irql; \
trace_entry->arg1 = (ulong)(a1); \
trace_entry->arg2 = (ulong)(a2); \
trace_entry->arg3 = (ulong)(a3); \
trace_entry->arg4 = (ulong)(a4); \
\
(buffer)->in++; \
if ((buffer)->in == TRACE_ENTRIES) \
(buffer)->in = 0; \
\
spin_unlock(&(buffer)->lock); \
} \
}
#else
#define SXG_TRACE(tdriver, buffer, tlevel, tname, a1, a2, a3, a4)
#endif
#endif
#endif /* _SXG_DEBUG_H_ */
/*
* Copyright 1997-2007 Alacritech, Inc. All rights reserved
*
* $Id: sxghif.h,v 1.5 2008/07/24 19:18:22 chris Exp $
*
* sxghif.h:
*
* This file contains structures and definitions for the
* Alacritech Sahara host interface
*/
/*******************************************************************************
* UCODE Registers
*******************************************************************************/
typedef struct _SXG_UCODE_REGS {
// Address 0 - 0x3F = Command codes 0-15 for TCB 0. Excode 0
u32 Icr; // Code = 0 (extended), ExCode = 0 - Int control
u32 RsvdReg1; // Code = 1 - TOE -NA
u32 RsvdReg2; // Code = 2 - TOE -NA
u32 RsvdReg3; // Code = 3 - TOE -NA
u32 RsvdReg4; // Code = 4 - TOE -NA
u32 RsvdReg5; // Code = 5 - TOE -NA
u32 CardUp; // Code = 6 - Microcode initialized when 1
u32 RsvdReg7; // Code = 7 - TOE -NA
u32 CodeNotUsed[8]; // Codes 8-15 not used. ExCode = 0
// This brings us to ExCode 1 at address 0x40 = Interrupt status pointer
u32 Isp; // Code = 0 (extended), ExCode = 1
u32 PadEx1[15]; // Codes 1-15 not used with extended codes
// ExCode 2 = Interrupt Status Register
u32 Isr; // Code = 0 (extended), ExCode = 2
u32 PadEx2[15];
// ExCode 3 = Event base register. Location of event rings
u32 EventBase; // Code = 0 (extended), ExCode = 3
u32 PadEx3[15];
// ExCode 4 = Event ring size
u32 EventSize; // Code = 0 (extended), ExCode = 4
u32 PadEx4[15];
// ExCode 5 = TCB Buffers base address
u32 TcbBase; // Code = 0 (extended), ExCode = 5
u32 PadEx5[15];
// ExCode 6 = TCB Composite Buffers base address
u32 TcbCompBase; // Code = 0 (extended), ExCode = 6
u32 PadEx6[15];
// ExCode 7 = Transmit ring base address
u32 XmtBase; // Code = 0 (extended), ExCode = 7
u32 PadEx7[15];
// ExCode 8 = Transmit ring size
u32 XmtSize; // Code = 0 (extended), ExCode = 8
u32 PadEx8[15];
// ExCode 9 = Receive ring base address
u32 RcvBase; // Code = 0 (extended), ExCode = 9
u32 PadEx9[15];
// ExCode 10 = Receive ring size
u32 RcvSize; // Code = 0 (extended), ExCode = 10
u32 PadEx10[15];
// ExCode 11 = Read EEPROM Config
u32 Config; // Code = 0 (extended), ExCode = 11
u32 PadEx11[15];
// ExCode 12 = Multicast bits 31:0
u32 McastLow; // Code = 0 (extended), ExCode = 12
u32 PadEx12[15];
// ExCode 13 = Multicast bits 63:32
u32 McastHigh; // Code = 0 (extended), ExCode = 13
u32 PadEx13[15];
// ExCode 14 = Ping
u32 Ping; // Code = 0 (extended), ExCode = 14
u32 PadEx14[15];
// ExCode 15 = Link MTU
u32 LinkMtu; // Code = 0 (extended), ExCode = 15
u32 PadEx15[15];
// ExCode 16 = Download synchronization
u32 LoadSync; // Code = 0 (extended), ExCode = 16
u32 PadEx16[15];
// ExCode 17 = Upper DRAM address bits on 32-bit systems
u32 Upper; // Code = 0 (extended), ExCode = 17
u32 PadEx17[15];
// ExCode 18 = Slowpath Send Index Address
u32 SPSendIndex; // Code = 0 (extended), ExCode = 18
u32 PadEx18[15];
u32 RsvdXF; // Code = 0 (extended), ExCode = 19
u32 PadEx19[15];
// ExCode 20 = Aggregation
u32 Aggregation; // Code = 0 (extended), ExCode = 20
u32 PadEx20[15];
// ExCode 21 = Receive MDL push timer
u32 PushTicks; // Code = 0 (extended), ExCode = 21
u32 PadEx21[15];
// ExCode 22 = TOE NA
u32 AckFrequency; // Code = 0 (extended), ExCode = 22
u32 PadEx22[15];
// ExCode 23 = TOE NA
u32 RsvdReg23;
u32 PadEx23[15];
// ExCode 24 = TOE NA
u32 RsvdReg24;
u32 PadEx24[15];
// ExCode 25 = TOE NA
u32 RsvdReg25; // Code = 0 (extended), ExCode = 25
u32 PadEx25[15];
// ExCode 26 = Receive checksum requirements
u32 ReceiveChecksum; // Code = 0 (extended), ExCode = 26
u32 PadEx26[15];
// ExCode 27 = RSS Requirements
u32 Rss; // Code = 0 (extended), ExCode = 27
u32 PadEx27[15];
// ExCode 28 = RSS Table
u32 RssTable; // Code = 0 (extended), ExCode = 28
u32 PadEx28[15];
// ExCode 29 = Event ring release entries
u32 EventRelease; // Code = 0 (extended), ExCode = 29
u32 PadEx29[15];
// ExCode 30 = Number of receive bufferlist commands on ring 0
u32 RcvCmd; // Code = 0 (extended), ExCode = 30
u32 PadEx30[15];
// ExCode 31 = slowpath transmit command - Data[31:0] = 1
u32 XmtCmd; // Code = 0 (extended), ExCode = 31
u32 PadEx31[15];
// ExCode 32 = Dump command
u32 DumpCmd; // Code = 0 (extended), ExCode = 32
u32 PadEx32[15];
// ExCode 33 = Debug command
u32 DebugCmd; // Code = 0 (extended), ExCode = 33
u32 PadEx33[15];
// There are 128 possible extended commands - each of account for 16
// words (including the non-relevent base command codes 1-15).
// Pad for the remainder of these here to bring us to the next CPU
// base. As extended codes are added, reduce the first array value in
// the following field
u32 PadToNextCpu[94][16]; // 94 = 128 - 34 (34 = Excodes 0 - 33)
} SXG_UCODE_REGS, *PSXG_UCODE_REGS;
// Interrupt control register (0) values
#define SXG_ICR_DISABLE 0x00000000
#define SXG_ICR_ENABLE 0x00000001
#define SXG_ICR_MASK 0x00000002
#define SXG_ICR_MSGID_MASK 0xFFFF0000
#define SXG_ICR_MSGID_SHIFT 16
#define SXG_ICR(_MessageId, _Data) \
((((_MessageId) << SXG_ICR_MSGID_SHIFT) & \
SXG_ICR_MSGID_MASK) | (_Data))
// The Microcode supports up to 16 RSS queues
#define SXG_MAX_RSS 16
#define SXG_MAX_RSS_TABLE_SIZE 256 // 256-byte max
#define SXG_RSS_TCP6 0x00000001 // RSS TCP over IPv6
#define SXG_RSS_TCP4 0x00000002 // RSS TCP over IPv4
#define SXG_RSS_LEGACY 0x00000004 // Line-base interrupts
#define SXG_RSS_TABLE_SIZE 0x0000FF00 // Table size mask
#define SXG_RSS_TABLE_SHIFT 8
#define SXG_RSS_BASE_CPU 0x00FF0000 // Base CPU (not used)
#define SXG_RSS_BASE_SHIFT 16
#define SXG_RCV_IP_CSUM_ENABLED 0x00000001 // ExCode 26 (ReceiveChecksum)
#define SXG_RCV_TCP_CSUM_ENABLED 0x00000002 // ExCode 26 (ReceiveChecksum)
#define SXG_XMT_CPUID_SHIFT 16
#if VPCI
#define SXG_CHECK_FOR_HANG_TIME 3000
#else
#define SXG_CHECK_FOR_HANG_TIME 5
#endif
/*
* TCB registers - This is really the same register memory area as UCODE_REGS
* above, but defined differently. Bits 17:06 of the address define the TCB,
* which means each TCB area occupies 0x40 (64) bytes, or 16 u32S. What really
* is happening is that these registers occupy the "PadEx[15]" areas in the
* SXG_UCODE_REGS definition above
*/
typedef struct _SXG_TCB_REGS {
u32 ExCode; /* Extended codes - see SXG_UCODE_REGS */
u32 Xmt; /* Code = 1 - # of Xmt descriptors added to ring */
u32 Rcv; /* Code = 2 - # of Rcv descriptors added to ring */
u32 Rsvd1; /* Code = 3 - TOE NA */
u32 Rsvd2; /* Code = 4 - TOE NA */
u32 Rsvd3; /* Code = 5 - TOE NA */
u32 Invalid; /* Code = 6 - Reserved for "CardUp" see above */
u32 Rsvd4; /* Code = 7 - TOE NA */
u32 Rsvd5; /* Code = 8 - TOE NA */
u32 Pad[7]; /* Codes 8-15 - Not used. */
} SXG_TCB_REGS, *PSXG_TCB_REGS;
/***************************************************************************
* ISR Format
* 31 0
* _______________________________________
* | | | | | | | | |
* |____|____|____|____|____|____|____|____|
* ^^^^ ^^^^ ^^^^ ^^^^ \ /
* ERR --|||| |||| |||| |||| -----------------
* EVENT ---||| |||| |||| |||| |
* ----|| |||| |||| |||| |-- Crash Address
* UPC -----| |||| |||| ||||
* LEVENT -------|||| |||| ||||
* PDQF --------||| |||| ||||
* RMISS ---------|| |||| ||||
* BREAK ----------| |||| ||||
* HBEATOK ------------|||| ||||
* NOHBEAT -------------||| ||||
* ERFULL --------------|| ||||
* XDROP ---------------| ||||
* -----------------||||
* -----------------||||--\
* ||---|-CpuId of crash
* |----/
***************************************************************************/
#define SXG_ISR_ERR 0x80000000 // Error
#define SXG_ISR_EVENT 0x40000000 // Event ring event
#define SXG_ISR_NONE1 0x20000000 // Not used
#define SXG_ISR_UPC 0x10000000 // Dump/debug command complete
#define SXG_ISR_LINK 0x08000000 // Link event
#define SXG_ISR_PDQF 0x04000000 // Processed data queue full
#define SXG_ISR_RMISS 0x02000000 // Drop - no host buf
#define SXG_ISR_BREAK 0x01000000 // Breakpoint hit
#define SXG_ISR_PING 0x00800000 // Heartbeat response
#define SXG_ISR_DEAD 0x00400000 // Card crash
#define SXG_ISR_ERFULL 0x00200000 // Event ring full
#define SXG_ISR_XDROP 0x00100000 // XMT Drop - no DRAM bufs or XMT err
#define SXG_ISR_SPSEND 0x00080000 // Slow send complete
#define SXG_ISR_CPU 0x00070000 // Dead CPU mask
#define SXG_ISR_CPU_SHIFT 16 // Dead CPU shift
#define SXG_ISR_CRASH 0x0000FFFF // Crash address mask
/***************************************************************************
*
* Event Ring entry
*
***************************************************************************/
/*
* 31 15 0
* .___________________.___________________.
* |<------------ Pad 0 ------------>|
* |_________|_________|_________|_________|0 0x00
* |<------------ Pad 1 ------------>|
* |_________|_________|_________|_________|4 0x04
* |<------------ Pad 2 ------------>|
* |_________|_________|_________|_________|8 0x08
* |<----------- Event Word 0 ------------>|
* |_________|_________|_________|_________|12 0x0c
* |<----------- Event Word 1 ------------>|
* |_________|_________|_________|_________|16 0x10
* |<------------- Toeplitz ------------>|
* |_________|_________|_________|_________|20 0x14
* |<----- Length ---->|<------ TCB Id --->|
* |_________|_________|_________|_________|24 0x18
* |<----- Status ---->|Evnt Code|Flsh Code|
* |_________|_________|_________|_________|28 0x1c
* ^ ^^^^ ^^^^
* |- VALID |||| ||||- RBUFC
* |||| |||-- SLOWR
* |||| ||--- UNUSED
* |||| |---- FASTC
* ||||------ FASTR
* |||-------
* ||--------
* |---------
*
* Slowpath status:
* _______________________________________
* |<----- Status ---->|Evnt Code|Flsh Code|
* |_________|Cmd Index|_________|_________|28 0x1c
* ^^^ ^^^^
* ||| ||||- ISTCPIP6
* ||| |||-- IPONLY
* ||| ||--- RCVERR
* ||| |---- IPCBAD
* |||------ TCPCBAD
* ||------- ISTCPIP
* |-------- SCERR
*
*/
#pragma pack(push, 1)
typedef struct _SXG_EVENT {
u32 Pad[1]; // not used
u32 SndUna; // SndUna value
u32 Resid; // receive MDL resid
union {
void * HostHandle; // Receive host handle
u32 Rsvd1; // TOE NA
struct {
u32 NotUsed;
u32 Rsvd2; // TOE NA
} Flush;
};
u32 Toeplitz; // RSS Toeplitz hash
union {
ushort Rsvd3; // TOE NA
ushort HdrOffset; // Slowpath
};
ushort Length; //
unsigned char Rsvd4; // TOE NA
unsigned char Code; // Event code
unsigned char CommandIndex; // New ring index
unsigned char Status; // Event status
} SXG_EVENT, *PSXG_EVENT;
#pragma pack(pop)
// Event code definitions
#define EVENT_CODE_BUFFERS 0x01 // Receive buffer list command (ring 0)
#define EVENT_CODE_SLOWRCV 0x02 // Slowpath receive
#define EVENT_CODE_UNUSED 0x04 // Was slowpath commands complete
// Status values
#define EVENT_STATUS_VALID 0x80 // Entry valid
// Slowpath status
#define EVENT_STATUS_ERROR 0x40 // Completed with error. Index in next byte
#define EVENT_STATUS_TCPIP4 0x20 // TCPIPv4 frame
#define EVENT_STATUS_TCPBAD 0x10 // Bad TCP checksum
#define EVENT_STATUS_IPBAD 0x08 // Bad IP checksum
#define EVENT_STATUS_RCVERR 0x04 // Slowpath receive error
#define EVENT_STATUS_IPONLY 0x02 // IP frame
#define EVENT_STATUS_TCPIP6 0x01 // TCPIPv6 frame
#define EVENT_STATUS_TCPIP 0x21 // Combination of v4 and v6
// Event ring
// Size must be power of 2, between 128 and 16k
#define EVENT_RING_SIZE 4096 // ??
#define EVENT_RING_BATCH 16 // Hand entries back 16 at a time.
#define EVENT_BATCH_LIMIT 256 // Stop processing events after 256 (16 * 16)
typedef struct _SXG_EVENT_RING {
SXG_EVENT Ring[EVENT_RING_SIZE];
}SXG_EVENT_RING, *PSXG_EVENT_RING;
/***************************************************************************
*
* TCB Buffers
*
***************************************************************************/
// Maximum number of TCBS supported by hardware/microcode
#define SXG_MAX_TCB 4096
// Minimum TCBs before we fail initialization
#define SXG_MIN_TCB 512
// TCB Hash
// The bucket is determined by bits 11:4 of the toeplitz if we support 4k
// offloaded connections, 10:4 if we support 2k and so on.
#define SXG_TCB_BUCKET_SHIFT 4
#define SXG_TCB_PER_BUCKET 16
#define SXG_TCB_BUCKET_MASK 0xFF0 // Bucket portion of TCB ID
#define SXG_TCB_ELEMENT_MASK 0x00F // Element within bucket
#define SXG_TCB_BUCKETS 256 // 256 * 16 = 4k
#define SXG_TCB_BUFFER_SIZE 512 // ASSERT format is correct
#define SXG_TCB_RCVQ_SIZE 736
#define SXG_TCB_COMPOSITE_BUFFER_SIZE 1024
#define SXG_LOCATE_TCP_FRAME_HDR(_TcpObject, _IPv6) \
(((_TcpObject)->VlanId) ? \
((_IPv6) ? /* Vlan frame header = yes */ \
&(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp6.SxgTcp : \
&(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp.SxgTcp) : \
((_IPv6) ? /* Vlan frame header = No */ \
&(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp6.SxgTcp : \
&(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp.SxgTcp))
#define SXG_LOCATE_IP_FRAME_HDR(_TcpObject) \
(_TcpObject)->VlanId ? \
&(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp.Ip : \
&(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp.Ip
#define SXG_LOCATE_IP6_FRAME_HDR(_TcpObject) \
(_TcpObject)->VlanId ? \
&(_TcpObject)->CompBuffer->Frame.HasVlan.TcpIp6.Ip : \
&(_TcpObject)->CompBuffer->Frame.NoVlan.TcpIp6.Ip
#if DBG
// Horrible kludge to distinguish dumb-nic, slowpath, and
// fastpath traffic. Decrement the HopLimit by one
// for slowpath, two for fastpath. This assumes the limit is measurably
// greater than two, which I think is reasonable.
// Obviously this is DBG only. Maybe remove later, or #if 0 so we
// can set it when needed
#define SXG_DBG_HOP_LIMIT(_TcpObject, _FastPath) { \
PIPV6_HDR _Ip6FrameHdr; \
if((_TcpObject)->IPv6) { \
_Ip6FrameHdr = SXG_LOCATE_IP6_FRAME_HDR((_TcpObject)); \
if(_FastPath) { \
_Ip6FrameHdr->HopLimit = (_TcpObject)->Cached.TtlOrHopLimit - 2; \
} else { \
_Ip6FrameHdr->HopLimit = (_TcpObject)->Cached.TtlOrHopLimit - 1; \
} \
} \
}
#else
// Do nothing with free build
#define SXG_DBG_HOP_LIMIT(_TcpObject, _FastPath)
#endif
/***************************************************************************
* Receive and transmit rings
***************************************************************************/
#define SXG_MAX_RING_SIZE 256
#define SXG_XMT_RING_SIZE 128 // Start with 128
#define SXG_RCV_RING_SIZE 128 // Start with 128
#define SXG_MAX_ENTRIES 4096
// Structure and macros to manage a ring
typedef struct _SXG_RING_INFO {
unsigned char Head; // Where we add entries - Note unsigned char:RING_SIZE
unsigned char Tail; // Where we pull off completed entries
ushort Size; // Ring size - Must be multiple of 2
void * Context[SXG_MAX_RING_SIZE]; // Shadow ring
} SXG_RING_INFO, *PSXG_RING_INFO;
#define SXG_INITIALIZE_RING(_ring, _size) { \
(_ring).Head = 0; \
(_ring).Tail = 0; \
(_ring).Size = (_size); \
}
#define SXG_ADVANCE_INDEX(_index, _size) ((_index) = ((_index) + 1) & ((_size) - 1))
#define SXG_PREVIOUS_INDEX(_index, _size) (((_index) - 1) &((_size) - 1))
#define SXG_RING_EMPTY(_ring) ((_ring)->Head == (_ring)->Tail)
#define SXG_RING_FULL(_ring) ((((_ring)->Head + 1) & ((_ring)->Size - 1)) == (_ring)->Tail)
#define SXG_RING_ADVANCE_HEAD(_ring) SXG_ADVANCE_INDEX((_ring)->Head, ((_ring)->Size))
#define SXG_RING_RETREAT_HEAD(_ring) ((_ring)->Head = \
SXG_PREVIOUS_INDEX((_ring)->Head, (_ring)->Size))
#define SXG_RING_ADVANCE_TAIL(_ring) { \
ASSERT((_ring)->Tail != (_ring)->Head); \
SXG_ADVANCE_INDEX((_ring)->Tail, ((_ring)->Size)); \
}
// Set cmd to the next available ring entry, set the shadow context
// entry and advance the ring.
// The appropriate lock must be held when calling this macro
#define SXG_GET_CMD(_ring, _ringinfo, _cmd, _context) { \
if(SXG_RING_FULL(_ringinfo)) { \
(_cmd) = NULL; \
} else { \
(_cmd) = &(_ring)->Descriptors[(_ringinfo)->Head]; \
(_ringinfo)->Context[(_ringinfo)->Head] = (void *)(_context);\
SXG_RING_ADVANCE_HEAD(_ringinfo); \
} \
}
// Abort the previously allocated command by retreating the head.
// NOTE - The appopriate lock MUST NOT BE DROPPED between the SXG_GET_CMD
// and SXG_ABORT_CMD calls.
#define SXG_ABORT_CMD(_ringinfo) { \
ASSERT(!(SXG_RING_EMPTY(_ringinfo))); \
SXG_RING_RETREAT_HEAD(_ringinfo); \
(_ringinfo)->Context[(_ringinfo)->Head] = NULL; \
}
// For the given ring, return a pointer to the tail cmd and context,
// clear the context and advance the tail
#define SXG_RETURN_CMD(_ring, _ringinfo, _cmd, _context) { \
(_cmd) = &(_ring)->Descriptors[(_ringinfo)->Tail]; \
(_context) = (_ringinfo)->Context[(_ringinfo)->Tail]; \
(_ringinfo)->Context[(_ringinfo)->Tail] = NULL; \
SXG_RING_ADVANCE_TAIL(_ringinfo); \
}
/***************************************************************************
*
* Host Command Buffer - commands to INIC via the Cmd Rings
*
***************************************************************************/
/*
* 31 15 0
* .___________________.___________________.
* |<-------------- Sgl Low -------------->|
* |_________|_________|_________|_________|0 0x00
* |<-------------- Sgl High ------------->|
* |_________|_________|_________|_________|4 0x04
* |<------------- Sge 0 Low ----------->|
* |_________|_________|_________|_________|8 0x08
* |<------------- Sge 0 High ----------->|
* |_________|_________|_________|_________|12 0x0c
* |<------------ Sge 0 Length ---------->|
* |_________|_________|_________|_________|16 0x10
* |<----------- Window Update ----------->|
* |<-------- SP 1st SGE offset ---------->|
* |_________|_________|_________|_________|20 0x14
* |<----------- Total Length ------------>|
* |_________|_________|_________|_________|24 0x18
* |<----- LCnt ------>|<----- Flags ----->|
* |_________|_________|_________|_________|28 0x1c
*/
#pragma pack(push, 1)
typedef struct _SXG_CMD {
dma_addr_t Sgl; // Physical address of SGL
union {
struct {
dma64_addr_t FirstSgeAddress;// Address of first SGE
u32 FirstSgeLength; // Length of first SGE
union {
u32 Rsvd1; // TOE NA
u32 SgeOffset; // Slowpath - 2nd SGE offset
u32 Resid; // MDL completion - clobbers update
};
union {
u32 TotalLength; // Total transfer length
u32 Mss; // LSO MSS
};
} Buffer;
};
union {
struct {
unsigned char Flags:4; // slowpath flags
unsigned char IpHl:4; // Ip header length (>>2)
unsigned char MacLen; // Mac header len
} CsumFlags;
struct {
ushort Flags:4; // slowpath flags
ushort TcpHdrOff:7; // TCP
ushort MacLen:5; // Mac header len
} LsoFlags;
ushort Flags; // flags
};
union {
ushort SgEntries; // SG entry count including first sge
struct {
unsigned char Status; // Copied from event status
unsigned char NotUsed;
} Status;
};
} SXG_CMD, *PSXG_CMD;
#pragma pack(pop)
#pragma pack(push, 1)
typedef struct _VLAN_HDR {
ushort VlanTci;
ushort VlanTpid;
} VLAN_HDR, *PVLAN_HDR;
#pragma pack(pop)
/*
* Slowpath Flags:
*
*
* LSS Flags:
* .---
* /.--- TCP Large segment send
* //.---
* ///.---
* 3 1 1 ////
* 1 5 0 ||||
* .___________________.____________vvvv.
* | |MAC | TCP | |
* | LCnt |hlen|hdroff|Flgs|
* |___________________|||||||||||||____|
*
*
* Checksum Flags
*
* .---
* /.---
* //.--- Checksum TCP
* ///.--- Checksum IP
* 3 1 //// No bits - normal send
* 1 5 7 ||||
* .___________________._______________vvvv.
* | | Offload | IP | |
* | LCnt |MAC hlen |Hlen|Flgs|
* |___________________|____|____|____|____|
*
*/
// Slowpath CMD flags
#define SXG_SLOWCMD_CSUM_IP 0x01 // Checksum IP
#define SXG_SLOWCMD_CSUM_TCP 0x02 // Checksum TCP
#define SXG_SLOWCMD_LSO 0x04 // Large segment send
typedef struct _SXG_XMT_RING {
SXG_CMD Descriptors[SXG_XMT_RING_SIZE];
} SXG_XMT_RING, *PSXG_XMT_RING;
typedef struct _SXG_RCV_RING {
SXG_CMD Descriptors[SXG_RCV_RING_SIZE];
} SXG_RCV_RING, *PSXG_RCV_RING;
/***************************************************************************
* Share memory buffer types - Used to identify asynchronous
* shared memory allocation
***************************************************************************/
typedef enum {
SXG_BUFFER_TYPE_RCV, // Receive buffer
SXG_BUFFER_TYPE_SGL // SGL buffer
} SXG_BUFFER_TYPE;
// State for SXG buffers
#define SXG_BUFFER_FREE 0x01
#define SXG_BUFFER_BUSY 0x02
#define SXG_BUFFER_ONCARD 0x04
#define SXG_BUFFER_UPSTREAM 0x08
/***************************************************************************
* Receive data buffers
*
* Receive data buffers are given to the Sahara card 128 at a time.
* This is accomplished by filling in a "receive descriptor block"
* with 128 "receive descriptors". Each descriptor consists of
* a physical address, which the card uses as the address to
* DMA data into, and a virtual address, which is given back
* to the host in the "HostHandle" portion of an event.
* The receive descriptor data structure is defined below
* as SXG_RCV_DATA_DESCRIPTOR, and the corresponding block
* is defined as SXG_RCV_DESCRIPTOR_BLOCK.
*
* This receive descriptor block is given to the card by filling
* in the Sgl field of a SXG_CMD entry from pAdapt->RcvRings[0]
* with the physical address of the receive descriptor block.
*
* Both the receive buffers and the receive descriptor blocks
* require additional data structures to maintain them
* on a free queue and contain other information associated with them.
* Those data structures are defined as the SXG_RCV_DATA_BUFFER_HDR
* and SXG_RCV_DESCRIPTOR_BLOCK_HDR respectively.
*
* Since both the receive buffers and the receive descriptor block
* must be accessible by the card, both must be allocated out of
* shared memory. To ensure that we always have a descriptor
* block available for every 128 buffers, we allocate all of
* these resources together in a single block. This entire
* block is managed by a SXG_RCV_BLOCK_HDR, who's sole purpose
* is to maintain address information so that the entire block
* can be free later.
*
* Further complicating matters is the fact that the receive
* buffers must be variable in length in order to accomodate
* jumbo frame configurations. We configure the buffer
* length so that the buffer and it's corresponding SXG_RCV_DATA_BUFFER_HDR
* structure add up to an even boundary. Then we place the
* remaining data structures after 128 of them as shown in
* the following diagram:
*
* _________________________________________
* | |
* | Variable length receive buffer #1 |
* |_________________________________________|
* | |
* | SXG_RCV_DATA_BUFFER_HDR #1 |
* |_________________________________________| <== Even 2k or 10k boundary
* | |
* | ... repeat 2-128 .. |
* |_________________________________________|
* | |
* | SXG_RCV_DESCRIPTOR_BLOCK |
* | Contains SXG_RCV_DATA_DESCRIPTOR * 128 |
* |_________________________________________|
* | |
* | SXG_RCV_DESCRIPTOR_BLOCK_HDR |
* |_________________________________________|
* | |
* | SXG_RCV_BLOCK_HDR |
* |_________________________________________|
*
* Memory consumption:
* Non-jumbo:
* Buffers and SXG_RCV_DATA_BUFFER_HDR = 2k * 128 = 256k
* + SXG_RCV_DESCRIPTOR_BLOCK = 2k
* + SXG_RCV_DESCRIPTOR_BLOCK_HDR = ~32
* + SXG_RCV_BLOCK_HDR = ~32
* => Total = ~258k/block
*
* Jumbo:
* Buffers and SXG_RCV_DATA_BUFFER_HDR = 10k * 128 = 1280k
* + SXG_RCV_DESCRIPTOR_BLOCK = 2k
* + SXG_RCV_DESCRIPTOR_BLOCK_HDR = ~32
* + SXG_RCV_BLOCK_HDR = ~32
* => Total = ~1282k/block
*
***************************************************************************/
#define SXG_RCV_DATA_BUFFERS 4096 // Amount to give to the card
#define SXG_INITIAL_RCV_DATA_BUFFERS 8192 // Initial pool of buffers
#define SXG_MIN_RCV_DATA_BUFFERS 2048 // Minimum amount and when to get more
#define SXG_MAX_RCV_BLOCKS 128 // = 16384 receive buffers
// Receive buffer header
typedef struct _SXG_RCV_DATA_BUFFER_HDR {
dma_addr_t PhysicalAddress; // Buffer physical address
// Note - DO NOT USE the VirtualAddress field to locate data.
// Use the sxg.h:SXG_RECEIVE_DATA_LOCATION macro instead.
void *VirtualAddress; // Start of buffer
LIST_ENTRY FreeList; // Free queue of buffers
struct _SXG_RCV_DATA_BUFFER_HDR *Next; // Fastpath data buffer queue
u32 Size; // Buffer size
u32 ByteOffset; // See SXG_RESTORE_MDL_OFFSET
unsigned char State; // See SXG_BUFFER state above
unsigned char Status; // Event status (to log PUSH)
struct sk_buff * skb; // Double mapped (nbl and pkt)
} SXG_RCV_DATA_BUFFER_HDR, *PSXG_RCV_DATA_BUFFER_HDR;
// SxgSlowReceive uses the PACKET (skb) contained
// in the SXG_RCV_DATA_BUFFER_HDR when indicating dumb-nic data
#define SxgDumbRcvPacket skb
#define SXG_RCV_DATA_HDR_SIZE 256 // Space for SXG_RCV_DATA_BUFFER_HDR
#define SXG_RCV_DATA_BUFFER_SIZE 2048 // Non jumbo = 2k including HDR
#define SXG_RCV_JUMBO_BUFFER_SIZE 10240 // jumbo = 10k including HDR
// Receive data descriptor
typedef struct _SXG_RCV_DATA_DESCRIPTOR {
union {
struct sk_buff * VirtualAddress; // Host handle
u64 ForceTo8Bytes; // Force x86 to 8-byte boundary
};
dma_addr_t PhysicalAddress;
} SXG_RCV_DATA_DESCRIPTOR, *PSXG_RCV_DATA_DESCRIPTOR;
// Receive descriptor block
#define SXG_RCV_DESCRIPTORS_PER_BLOCK 128
#define SXG_RCV_DESCRIPTOR_BLOCK_SIZE 2048 // For sanity check
typedef struct _SXG_RCV_DESCRIPTOR_BLOCK {
SXG_RCV_DATA_DESCRIPTOR Descriptors[SXG_RCV_DESCRIPTORS_PER_BLOCK];
} SXG_RCV_DESCRIPTOR_BLOCK, *PSXG_RCV_DESCRIPTOR_BLOCK;
// Receive descriptor block header
typedef struct _SXG_RCV_DESCRIPTOR_BLOCK_HDR {
void * VirtualAddress; // Start of 2k buffer
dma_addr_t PhysicalAddress; // ..and it's physical address
LIST_ENTRY FreeList; // Free queue of descriptor blocks
unsigned char State; // See SXG_BUFFER state above
} SXG_RCV_DESCRIPTOR_BLOCK_HDR, *PSXG_RCV_DESCRIPTOR_BLOCK_HDR;
// Receive block header
typedef struct _SXG_RCV_BLOCK_HDR {
void * VirtualAddress; // Start of virtual memory
dma_addr_t PhysicalAddress; // ..and it's physical address
LIST_ENTRY AllList; // Queue of all SXG_RCV_BLOCKS
} SXG_RCV_BLOCK_HDR, *PSXG_RCV_BLOCK_HDR;
// Macros to determine data structure offsets into receive block
#define SXG_RCV_BLOCK_SIZE(_Buffersize) \
(((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) + \
(sizeof(SXG_RCV_DESCRIPTOR_BLOCK)) + \
(sizeof(SXG_RCV_DESCRIPTOR_BLOCK_HDR)) + \
(sizeof(SXG_RCV_BLOCK_HDR)))
#define SXG_RCV_BUFFER_DATA_SIZE(_Buffersize) \
((_Buffersize) - SXG_RCV_DATA_HDR_SIZE)
#define SXG_RCV_DATA_BUFFER_HDR_OFFSET(_Buffersize) \
((_Buffersize) - SXG_RCV_DATA_HDR_SIZE)
#define SXG_RCV_DESCRIPTOR_BLOCK_OFFSET(_Buffersize) \
((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK)
#define SXG_RCV_DESCRIPTOR_BLOCK_HDR_OFFSET(_Buffersize) \
(((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) + \
(sizeof(SXG_RCV_DESCRIPTOR_BLOCK)))
#define SXG_RCV_BLOCK_HDR_OFFSET(_Buffersize) \
(((_Buffersize) * SXG_RCV_DESCRIPTORS_PER_BLOCK) + \
(sizeof(SXG_RCV_DESCRIPTOR_BLOCK)) + \
(sizeof(SXG_RCV_DESCRIPTOR_BLOCK_HDR)))
// Use the miniport reserved portion of the NBL to locate
// our SXG_RCV_DATA_BUFFER_HDR structure.
typedef struct _SXG_RCV_NBL_RESERVED {
PSXG_RCV_DATA_BUFFER_HDR RcvDataBufferHdr;
void * Available;
} SXG_RCV_NBL_RESERVED, *PSXG_RCV_NBL_RESERVED;
#define SXG_RCV_NBL_BUFFER_HDR(_NBL) (((PSXG_RCV_NBL_RESERVED)NET_BUFFER_LIST_MINIPORT_RESERVED(_NBL))->RcvDataBufferHdr)
/***************************************************************************
* Scatter gather list buffer
***************************************************************************/
#define SXG_INITIAL_SGL_BUFFERS 8192 // Initial pool of SGL buffers
#define SXG_MIN_SGL_BUFFERS 2048 // Minimum amount and when to get more
#define SXG_MAX_SGL_BUFFERS 16384 // Maximum to allocate (note ADAPT:ushort)
// Self identifying structure type
typedef enum _SXG_SGL_TYPE {
SXG_SGL_DUMB, // Dumb NIC SGL
SXG_SGL_SLOW, // Slowpath protocol header - see below
SXG_SGL_CHIMNEY // Chimney offload SGL
} SXG_SGL_TYPE, PSXG_SGL_TYPE;
// Note - the description below is Microsoft specific
//
// The following definition specifies the amount of shared memory to allocate
// for the SCATTER_GATHER_LIST portion of the SXG_SCATTER_GATHER data structure.
// The following considerations apply when setting this value:
// - First, the Sahara card is designed to read the Microsoft SGL structure
// straight out of host memory. This means that the SGL must reside in
// shared memory. If the length here is smaller than the SGL for the
// NET_BUFFER, then NDIS will allocate its own buffer. The buffer
// that NDIS allocates is not in shared memory, so when this happens,
// the SGL will need to be copied to a set of SXG_SCATTER_GATHER buffers.
// In other words.. we don't want this value to be too small.
// - On the other hand.. we're allocating up to 16k of these things. If
// we make this too big, we start to consume a ton of memory..
// At the moment, I'm going to limit the number of SG entries to 150.
// If each entry maps roughly 4k, then this should cover roughly 600kB
// NET_BUFFERs. Furthermore, since each entry is 24 bytes, the total
// SGE portion of the structure consumes 3600 bytes, which should allow
// the entire SXG_SCATTER_GATHER structure to reside comfortably within
// a 4k block, providing the remaining fields stay under 500 bytes.
//
// So with 150 entries, the SXG_SCATTER_GATHER structure becomes roughly
// 4k. At 16k of them, that amounts to 64M of shared memory. A ton, but
// manageable.
#define SXG_SGL_ENTRIES 150
// The ucode expects an NDIS SGL structure that
// is formatted for an x64 system. When running
// on an x64 system, we can simply hand the NDIS SGL
// to the card directly. For x86 systems we must reconstruct
// the SGL. The following structure defines an x64
// formatted SGL entry
typedef struct _SXG_X64_SGE {
dma64_addr_t Address; // same as wdm.h
u32 Length; // same as wdm.h
u32 CompilerPad;// The compiler pads to 8-bytes
u64 Reserved; // u32 * in wdm.h. Force to 8 bytes
} SXG_X64_SGE, *PSXG_X64_SGE;
typedef struct _SCATTER_GATHER_ELEMENT {
dma64_addr_t Address; // same as wdm.h
u32 Length; // same as wdm.h
u32 CompilerPad;// The compiler pads to 8-bytes
u64 Reserved; // u32 * in wdm.h. Force to 8 bytes
} SCATTER_GATHER_ELEMENT, *PSCATTER_GATHER_ELEMENT;
typedef struct _SCATTER_GATHER_LIST {
u32 NumberOfElements;
u32 * Reserved;
SCATTER_GATHER_ELEMENT Elements[];
} SCATTER_GATHER_LIST, *PSCATTER_GATHER_LIST;
// The card doesn't care about anything except elements, so
// we can leave the u32 * reserved field alone in the following
// SGL structure. But redefine from wdm.h:SCATTER_GATHER_LIST so
// we can specify SXG_X64_SGE and define a fixed number of elements
typedef struct _SXG_X64_SGL {
u32 NumberOfElements;
u32 * Reserved;
SXG_X64_SGE Elements[SXG_SGL_ENTRIES];
} SXG_X64_SGL, *PSXG_X64_SGL;
typedef struct _SXG_SCATTER_GATHER {
SXG_SGL_TYPE Type; // FIRST! Dumb-nic or offload
void * adapter; // Back pointer to adapter
LIST_ENTRY FreeList; // Free SXG_SCATTER_GATHER blocks
LIST_ENTRY AllList; // All SXG_SCATTER_GATHER blocks
dma_addr_t PhysicalAddress;// physical address
unsigned char State; // See SXG_BUFFER state above
unsigned char CmdIndex; // Command ring index
struct sk_buff * DumbPacket; // Associated Packet
u32 Direction; // For asynchronous completions
u32 CurOffset; // Current SGL offset
u32 SglRef; // SGL reference count
VLAN_HDR VlanTag; // VLAN tag to be inserted into SGL
PSCATTER_GATHER_LIST pSgl; // SGL Addr. Possibly &Sgl
SXG_X64_SGL Sgl; // SGL handed to card
} SXG_SCATTER_GATHER, *PSXG_SCATTER_GATHER;
#if defined(CONFIG_X86_64)
#define SXG_SGL_BUFFER(_SxgSgl) (&_SxgSgl->Sgl)
#define SXG_SGL_BUF_SIZE sizeof(SXG_X64_SGL)
#elif defined(CONFIG_X86)
// Force NDIS to give us it's own buffer so we can reformat to our own
#define SXG_SGL_BUFFER(_SxgSgl) NULL
#define SXG_SGL_BUF_SIZE 0
#else
Stop Compilation;
#endif
/*
* Copyright 1997-2007 Alacritech, Inc. All rights reserved
*
* $Id: sxghw.h,v 1.2 2008/07/24 17:24:23 chris Exp $
*
* sxghw.h:
*
* This file contains structures and definitions for the
* Alacritech Sahara hardware
*/
/*******************************************************************************
* Configuration space
*******************************************************************************/
// PCI Vendor ID
#define SXG_VENDOR_ID 0x139A // Alacritech's Vendor ID
// PCI Device ID
#define SXG_DEVICE_ID 0x0009 // Sahara Device ID
//
// Subsystem IDs.
//
// The subsystem ID value is broken into bit fields as follows:
// Bits [15:12] - Function
// Bits [11:8] - OEM and/or operating system.
// Bits [7:0] - Base SID.
//
// SSID field (bit) masks
#define SSID_BASE_MASK 0x00FF // Base subsystem ID mask
#define SSID_OEM_MASK 0x0F00 // Subsystem OEM mask
#define SSID_FUNC_MASK 0xF000 // Subsystem function mask
// Base SSID's
#define SSID_SAHARA_PROTO 0x0018 // 100022 Sahara prototype (XenPak) board
#define SSID_SAHARA_FIBER 0x0019 // 100023 Sahara 1-port fiber board
#define SSID_SAHARA_COPPER 0x001A // 100024 Sahara 1-port copper board
// Useful SSID macros
#define SSID_BASE(ssid) ((ssid) & SSID_BASE_MASK) // isolate base SSID bits
#define SSID_OEM(ssid) ((ssid) & SSID_OEM_MASK) // isolate SSID OEM bits
#define SSID_FUNC(ssid) ((ssid) & SSID_FUNC_MASK) // isolate SSID function bits
/*******************************************************************************
* HW Register Space
*******************************************************************************/
#define SXG_HWREG_MEMSIZE 0x4000 // 16k
#pragma pack(push, 1)
typedef struct _SXG_HW_REGS {
u32 Reset; // Write 0xdead to invoke soft reset
u32 Pad1; // No register defined at offset 4
u32 InterruptMask0; // Deassert legacy interrupt on function 0
u32 InterruptMask1; // Deassert legacy interrupt on function 1
u32 UcodeDataLow; // Store microcode instruction bits 31-0
u32 UcodeDataMiddle; // Store microcode instruction bits 63-32
u32 UcodeDataHigh; // Store microcode instruction bits 95-64
u32 UcodeAddr; // Store microcode address - See flags below
u32 PadTo0x80[24]; // Pad to Xcv configuration registers
u32 MacConfig0; // 0x80 - AXGMAC Configuration Register 0
u32 MacConfig1; // 0x84 - AXGMAC Configuration Register 1
u32 MacConfig2; // 0x88 - AXGMAC Configuration Register 2
u32 MacConfig3; // 0x8C - AXGMAC Configuration Register 3
u32 MacAddressLow; // 0x90 - AXGMAC MAC Station Address - octets 1-4
u32 MacAddressHigh; // 0x94 - AXGMAC MAC Station Address - octets 5-6
u32 MacReserved1[2]; // 0x98 - AXGMAC Reserved
u32 MacMaxFrameLen; // 0xA0 - AXGMAC Maximum Frame Length
u32 MacReserved2[2]; // 0xA4 - AXGMAC Reserved
u32 MacRevision; // 0xAC - AXGMAC Revision Level Register
u32 MacReserved3[4]; // 0xB0 - AXGMAC Reserved
u32 MacAmiimCmd; // 0xC0 - AXGMAC AMIIM Command Register
u32 MacAmiimField; // 0xC4 - AXGMAC AMIIM Field Register
u32 MacAmiimConfig; // 0xC8 - AXGMAC AMIIM Configuration Register
u32 MacAmiimLink; // 0xCC - AXGMAC AMIIM Link Fail Vector Register
u32 MacAmiimIndicator; // 0xD0 - AXGMAC AMIIM Indicator Registor
u32 PadTo0x100[11]; // 0xD4 - 0x100 - Pad
u32 XmtConfig; // 0x100 - Transmit Configuration Register
u32 RcvConfig; // 0x104 - Receive Configuration Register 1
u32 LinkAddress0Low; // 0x108 - Link address 0
u32 LinkAddress0High; // 0x10C - Link address 0
u32 LinkAddress1Low; // 0x110 - Link address 1
u32 LinkAddress1High; // 0x114 - Link address 1
u32 LinkAddress2Low; // 0x118 - Link address 2
u32 LinkAddress2High; // 0x11C - Link address 2
u32 LinkAddress3Low; // 0x120 - Link address 3
u32 LinkAddress3High; // 0x124 - Link address 3
u32 ToeplitzKey[10]; // 0x128 - 0x150 - Toeplitz key
u32 SocketKey[10]; // 0x150 - 0x178 - Socket Key
u32 LinkStatus; // 0x178 - Link status
u32 ClearStats; // 0x17C - Clear Stats
u32 XmtErrorsLow; // 0x180 - Transmit stats - errors
u32 XmtErrorsHigh; // 0x184 - Transmit stats - errors
u32 XmtFramesLow; // 0x188 - Transmit stats - frame count
u32 XmtFramesHigh; // 0x18C - Transmit stats - frame count
u32 XmtBytesLow; // 0x190 - Transmit stats - byte count
u32 XmtBytesHigh; // 0x194 - Transmit stats - byte count
u32 XmtTcpSegmentsLow; // 0x198 - Transmit stats - TCP segments
u32 XmtTcpSegmentsHigh; // 0x19C - Transmit stats - TCP segments
u32 XmtTcpBytesLow; // 0x1A0 - Transmit stats - TCP bytes
u32 XmtTcpBytesHigh; // 0x1A4 - Transmit stats - TCP bytes
u32 RcvErrorsLow; // 0x1A8 - Receive stats - errors
u32 RcvErrorsHigh; // 0x1AC - Receive stats - errors
u32 RcvFramesLow; // 0x1B0 - Receive stats - frame count
u32 RcvFramesHigh; // 0x1B4 - Receive stats - frame count
u32 RcvBytesLow; // 0x1B8 - Receive stats - byte count
u32 RcvBytesHigh; // 0x1BC - Receive stats - byte count
u32 RcvTcpSegmentsLow; // 0x1C0 - Receive stats - TCP segments
u32 RcvTcpSegmentsHigh; // 0x1C4 - Receive stats - TCP segments
u32 RcvTcpBytesLow; // 0x1C8 - Receive stats - TCP bytes
u32 RcvTcpBytesHigh; // 0x1CC - Receive stats - TCP bytes
u32 PadTo0x200[12]; // 0x1D0 - 0x200 - Pad
u32 Software[1920]; // 0x200 - 0x2000 - Software defined (not used)
u32 MsixTable[1024]; // 0x2000 - 0x3000 - MSIX Table
u32 MsixBitArray[1024]; // 0x3000 - 0x4000 - MSIX Pending Bit Array
} SXG_HW_REGS, *PSXG_HW_REGS;
#pragma pack(pop)
// Microcode Address Flags
#define MICROCODE_ADDRESS_GO 0x80000000 // Start microcode
#define MICROCODE_ADDRESS_WRITE 0x40000000 // Store microcode
#define MICROCODE_ADDRESS_READ 0x20000000 // Read microcode
#define MICROCODE_ADDRESS_PARITY 0x10000000 // Parity error detected
#define MICROCODE_ADDRESS_MASK 0x00001FFF // Address bits
// Link Address Registers
#define LINK_ADDRESS_ENABLE 0x80000000 // Applied to link address high
// Microsoft register space size
#define SXG_UCODEREG_MEMSIZE 0x40000 // 256k
// Sahara microcode register address format. The command code,
// extended command code, and associated processor are encoded in
// the address bits as follows
#define SXG_ADDRESS_CODE_SHIFT 2 // Base command code
#define SXG_ADDRESS_CODE_MASK 0x0000003C
#define SXG_ADDRESS_EXCODE_SHIFT 6 // Extended (or sub) command code
#define SXG_ADDRESS_EXCODE_MASK 0x00001FC0
#define SXG_ADDRESS_CPUID_SHIFT 13 // CPU
#define SXG_ADDRESS_CPUID_MASK 0x0003E000
#define SXG_REGISTER_SIZE_PER_CPU 0x00002000 // Used to sanity check UCODE_REGS structure
// Sahara receive sequencer status values
#define SXG_RCV_STATUS_ATTN 0x80000000 // Attention
#define SXG_RCV_STATUS_TRANSPORT_MASK 0x3F000000 // Transport mask
#define SXG_RCV_STATUS_TRANSPORT_ERROR 0x20000000 // Transport error
#define SXG_RCV_STATUS_TRANSPORT_CSUM 0x23000000 // Transport cksum error
#define SXG_RCV_STATUS_TRANSPORT_UFLOW 0x22000000 // Transport underflow
#define SXG_RCV_STATUS_TRANSPORT_HDRLEN 0x20000000 // Transport header length
#define SXG_RCV_STATUS_TRANSPORT_FLAGS 0x10000000 // Transport flags detected
#define SXG_RCV_STATUS_TRANSPORT_OPTS 0x08000000 // Transport options detected
#define SXG_RCV_STATUS_TRANSPORT_SESS_MASK 0x07000000 // Transport DDP
#define SXG_RCV_STATUS_TRANSPORT_DDP 0x06000000 // Transport DDP
#define SXG_RCV_STATUS_TRANSPORT_iSCSI 0x05000000 // Transport iSCSI
#define SXG_RCV_STATUS_TRANSPORT_NFS 0x04000000 // Transport NFS
#define SXG_RCV_STATUS_TRANSPORT_FTP 0x03000000 // Transport FTP
#define SXG_RCV_STATUS_TRANSPORT_HTTP 0x02000000 // Transport HTTP
#define SXG_RCV_STATUS_TRANSPORT_SMB 0x01000000 // Transport SMB
#define SXG_RCV_STATUS_NETWORK_MASK 0x00FF0000 // Network mask
#define SXG_RCV_STATUS_NETWORK_ERROR 0x00800000 // Network error
#define SXG_RCV_STATUS_NETWORK_CSUM 0x00830000 // Network cksum error
#define SXG_RCV_STATUS_NETWORK_UFLOW 0x00820000 // Network underflow error
#define SXG_RCV_STATUS_NETWORK_HDRLEN 0x00800000 // Network header length
#define SXG_RCV_STATUS_NETWORK_OFLOW 0x00400000 // Network overflow detected
#define SXG_RCV_STATUS_NETWORK_MCAST 0x00200000 // Network multicast detected
#define SXG_RCV_STATUS_NETWORK_OPTIONS 0x00100000 // Network options detected
#define SXG_RCV_STATUS_NETWORK_OFFSET 0x00080000 // Network offset detected
#define SXG_RCV_STATUS_NETWORK_FRAGMENT 0x00040000 // Network fragment detected
#define SXG_RCV_STATUS_NETWORK_TRANS_MASK 0x00030000 // Network transport type mask
#define SXG_RCV_STATUS_NETWORK_UDP 0x00020000 // UDP
#define SXG_RCV_STATUS_NETWORK_TCP 0x00010000 // TCP
#define SXG_RCV_STATUS_IPONLY 0x00008000 // IP-only not TCP
#define SXG_RCV_STATUS_PKT_PRI 0x00006000 // Receive priority
#define SXG_RCV_STATUS_PKT_PRI_SHFT 13 // Receive priority shift
#define SXG_RCV_STATUS_PARITY 0x00001000 // MAC Receive RAM parity error
#define SXG_RCV_STATUS_ADDRESS_MASK 0x00000F00 // Link address detection mask
#define SXG_RCV_STATUS_ADDRESS_D 0x00000B00 // Link address D
#define SXG_RCV_STATUS_ADDRESS_C 0x00000A00 // Link address C
#define SXG_RCV_STATUS_ADDRESS_B 0x00000900 // Link address B
#define SXG_RCV_STATUS_ADDRESS_A 0x00000800 // Link address A
#define SXG_RCV_STATUS_ADDRESS_BCAST 0x00000300 // Link address broadcast
#define SXG_RCV_STATUS_ADDRESS_MCAST 0x00000200 // Link address multicast
#define SXG_RCV_STATUS_ADDRESS_CMCAST 0x00000100 // Link control multicast
#define SXG_RCV_STATUS_LINK_MASK 0x000000FF // Link status mask
#define SXG_RCV_STATUS_LINK_ERROR 0x00000080 // Link error
#define SXG_RCV_STATUS_LINK_MASK 0x000000FF // Link status mask
#define SXG_RCV_STATUS_LINK_PARITY 0x00000087 // RcvMacQ parity error
#define SXG_RCV_STATUS_LINK_EARLY 0x00000086 // Data early
#define SXG_RCV_STATUS_LINK_BUFOFLOW 0x00000085 // Buffer overflow
#define SXG_RCV_STATUS_LINK_CODE 0x00000084 // Link code error
#define SXG_RCV_STATUS_LINK_DRIBBLE 0x00000083 // Dribble nibble
#define SXG_RCV_STATUS_LINK_CRC 0x00000082 // CRC error
#define SXG_RCV_STATUS_LINK_OFLOW 0x00000081 // Link overflow
#define SXG_RCV_STATUS_LINK_UFLOW 0x00000080 // Link underflow
#define SXG_RCV_STATUS_LINK_8023 0x00000020 // 802.3
#define SXG_RCV_STATUS_LINK_SNAP 0x00000010 // Snap
#define SXG_RCV_STATUS_LINK_VLAN 0x00000008 // VLAN
#define SXG_RCV_STATUS_LINK_TYPE_MASK 0x00000007 // Network type mask
#define SXG_RCV_STATUS_LINK_CONTROL 0x00000003 // Control packet
#define SXG_RCV_STATUS_LINK_IPV6 0x00000002 // IPv6 packet
#define SXG_RCV_STATUS_LINK_IPV4 0x00000001 // IPv4 packet
/***************************************************************************
* Sahara receive and transmit configuration registers
***************************************************************************/
#define RCV_CONFIG_RESET 0x80000000 // RcvConfig register reset
#define RCV_CONFIG_ENABLE 0x40000000 // Enable the receive logic
#define RCV_CONFIG_ENPARSE 0x20000000 // Enable the receive parser
#define RCV_CONFIG_SOCKET 0x10000000 // Enable the socket detector
#define RCV_CONFIG_RCVBAD 0x08000000 // Receive all bad frames
#define RCV_CONFIG_CONTROL 0x04000000 // Receive all control frames
#define RCV_CONFIG_RCVPAUSE 0x02000000 // Enable pause transmit when attn
#define RCV_CONFIG_TZIPV6 0x01000000 // Include TCP port w/ IPv6 toeplitz
#define RCV_CONFIG_TZIPV4 0x00800000 // Include TCP port w/ IPv4 toeplitz
#define RCV_CONFIG_FLUSH 0x00400000 // Flush buffers
#define RCV_CONFIG_PRIORITY_MASK 0x00300000 // Priority level
#define RCV_CONFIG_HASH_MASK 0x00030000 // Hash depth
#define RCV_CONFIG_HASH_8 0x00000000 // Hash depth 8
#define RCV_CONFIG_HASH_16 0x00010000 // Hash depth 16
#define RCV_CONFIG_HASH_4 0x00020000 // Hash depth 4
#define RCV_CONFIG_HASH_2 0x00030000 // Hash depth 2
#define RCV_CONFIG_BUFLEN_MASK 0x0000FFF0 // Buffer length bits 15:4. ie multiple of 16.
#define RCV_CONFIG_SKT_DIS 0x00000008 // Disable socket detection on attn
// Macro to determine RCV_CONFIG_BUFLEN based on maximum frame size.
// We add 18 bytes for Sahara receive status and padding, plus 4 bytes for CRC,
// and round up to nearest 16 byte boundary
#define RCV_CONFIG_BUFSIZE(_MaxFrame) ((((_MaxFrame) + 22) + 15) & RCV_CONFIG_BUFLEN_MASK)
#define XMT_CONFIG_RESET 0x80000000 // XmtConfig register reset
#define XMT_CONFIG_ENABLE 0x40000000 // Enable transmit logic
#define XMT_CONFIG_MAC_PARITY 0x20000000 // Inhibit MAC RAM parity error
#define XMT_CONFIG_BUF_PARITY 0x10000000 // Inhibit D2F buffer parity error
#define XMT_CONFIG_MEM_PARITY 0x08000000 // Inhibit 1T SRAM parity error
#define XMT_CONFIG_INVERT_PARITY 0x04000000 // Invert MAC RAM parity
#define XMT_CONFIG_INITIAL_IPID 0x0000FFFF // Initial IPID
/***************************************************************************
* A-XGMAC Registers - Occupy 0x80 - 0xD4 of the SXG_HW_REGS
*
* Full register descriptions can be found in axgmac.pdf
***************************************************************************/
// A-XGMAC Configuration Register 0
#define AXGMAC_CFG0_SUB_RESET 0x80000000 // Sub module reset
#define AXGMAC_CFG0_RCNTRL_RESET 0x00400000 // Receive control reset
#define AXGMAC_CFG0_RFUNC_RESET 0x00200000 // Receive function reset
#define AXGMAC_CFG0_TCNTRL_RESET 0x00040000 // Transmit control reset
#define AXGMAC_CFG0_TFUNC_RESET 0x00020000 // Transmit function reset
#define AXGMAC_CFG0_MII_RESET 0x00010000 // MII Management reset
// A-XGMAC Configuration Register 1
#define AXGMAC_CFG1_XMT_PAUSE 0x80000000 // Allow the sending of Pause frames
#define AXGMAC_CFG1_XMT_EN 0x40000000 // Enable transmit
#define AXGMAC_CFG1_RCV_PAUSE 0x20000000 // Allow the detection of Pause frames
#define AXGMAC_CFG1_RCV_EN 0x10000000 // Enable receive
#define AXGMAC_CFG1_XMT_STATE 0x04000000 // Current transmit state - READ ONLY
#define AXGMAC_CFG1_RCV_STATE 0x01000000 // Current receive state - READ ONLY
#define AXGMAC_CFG1_XOFF_SHORT 0x00001000 // Only pause for 64 slot on XOFF
#define AXGMAC_CFG1_XMG_FCS1 0x00000400 // Delay transmit FCS 1 4-byte word
#define AXGMAC_CFG1_XMG_FCS2 0x00000800 // Delay transmit FCS 2 4-byte words
#define AXGMAC_CFG1_XMG_FCS3 0x00000C00 // Delay transmit FCS 3 4-byte words
#define AXGMAC_CFG1_RCV_FCS1 0x00000100 // Delay receive FCS 1 4-byte word
#define AXGMAC_CFG1_RCV_FCS2 0x00000200 // Delay receive FCS 2 4-byte words
#define AXGMAC_CFG1_RCV_FCS3 0x00000300 // Delay receive FCS 3 4-byte words
#define AXGMAC_CFG1_PKT_OVERRIDE 0x00000080 // Per-packet override enable
#define AXGMAC_CFG1_SWAP 0x00000040 // Byte swap enable
#define AXGMAC_CFG1_SHORT_ASSERT 0x00000020 // ASSERT srdrpfrm on short frame (<64)
#define AXGMAC_CFG1_RCV_STRICT 0x00000010 // RCV only 802.3AE when CLEAR
#define AXGMAC_CFG1_CHECK_LEN 0x00000008 // Verify frame length
#define AXGMAC_CFG1_GEN_FCS 0x00000004 // Generate FCS
#define AXGMAC_CFG1_PAD_MASK 0x00000003 // Mask for pad bits
#define AXGMAC_CFG1_PAD_64 0x00000001 // Pad frames to 64 bytes
#define AXGMAC_CFG1_PAD_VLAN 0x00000002 // Detect VLAN and pad to 68 bytes
#define AXGMAC_CFG1_PAD_68 0x00000003 // Pad to 68 bytes
// A-XGMAC Configuration Register 2
#define AXGMAC_CFG2_GEN_PAUSE 0x80000000 // Generate single pause frame (test)
#define AXGMAC_CFG2_LF_MANUAL 0x08000000 // Manual link fault sequence
#define AXGMAC_CFG2_LF_AUTO 0x04000000 // Auto link fault sequence
#define AXGMAC_CFG2_LF_REMOTE 0x02000000 // Remote link fault (READ ONLY)
#define AXGMAC_CFG2_LF_LOCAL 0x01000000 // Local link fault (READ ONLY)
#define AXGMAC_CFG2_IPG_MASK 0x001F0000 // Inter packet gap
#define AXGMAC_CFG2_IPG_SHIFT 16
#define AXGMAC_CFG2_PAUSE_XMT 0x00008000 // Pause transmit module
#define AXGMAC_CFG2_IPG_EXTEN 0x00000020 // Enable IPG extension algorithm
#define AXGMAC_CFG2_IPGEX_MASK 0x0000001F // IPG extension
// A-XGMAC Configuration Register 3
#define AXGMAC_CFG3_RCV_DROP 0xFFFF0000 // Receive frame drop filter
#define AXGMAC_CFG3_RCV_DONT_CARE 0x0000FFFF // Receive frame don't care filter
// A-XGMAC Station Address Register - Octets 1-4
#define AXGMAC_SARLOW_OCTET_ONE 0xFF000000 // First octet
#define AXGMAC_SARLOW_OCTET_TWO 0x00FF0000 // Second octet
#define AXGMAC_SARLOW_OCTET_THREE 0x0000FF00 // Third octet
#define AXGMAC_SARLOW_OCTET_FOUR 0x000000FF // Fourth octet
// A-XGMAC Station Address Register - Octets 5-6
#define AXGMAC_SARHIGH_OCTET_FIVE 0xFF000000 // Fifth octet
#define AXGMAC_SARHIGH_OCTET_SIX 0x00FF0000 // Sixth octet
// A-XGMAC Maximum frame length register
#define AXGMAC_MAXFRAME_XMT 0x3FFF0000 // Maximum transmit frame length
#define AXGMAC_MAXFRAME_XMT_SHIFT 16
#define AXGMAC_MAXFRAME_RCV 0x0000FFFF // Maximum receive frame length
// This register doesn't need to be written for standard MTU.
// For jumbo, I'll just statically define the value here. This
// value sets the receive byte count to 9036 (0x234C) and the
// transmit WORD count to 2259 (0x8D3). These values include 22
// bytes of padding beyond the jumbo MTU of 9014
#define AXGMAC_MAXFRAME_JUMBO 0x08D3234C
// A-XGMAC Revision level
#define AXGMAC_REVISION_MASK 0x0000FFFF // Revision level
// A-XGMAC AMIIM Command Register
#define AXGMAC_AMIIM_CMD_START 0x00000008 // Command start
#define AXGMAC_AMIIM_CMD_MASK 0x00000007 // Command
#define AXGMAC_AMIIM_CMD_LEGACY_WRITE 1 // 10/100/1000 Mbps Phy Write
#define AXGMAC_AMIIM_CMD_LEGACY_READ 2 // 10/100/1000 Mbps Phy Read
#define AXGMAC_AMIIM_CMD_MONITOR_SINGLE 3 // Monitor single PHY
#define AXGMAC_AMIIM_CMD_MONITOR_MULTIPLE 4 // Monitor multiple contiguous PHYs
#define AXGMAC_AMIIM_CMD_10G_OPERATION 5 // Present AMIIM Field Reg
#define AXGMAC_AMIIM_CMD_CLEAR_LINK_FAIL 6 // Clear Link Fail Bit in MIIM
// A-XGMAC AMIIM Field Register
#define AXGMAC_AMIIM_FIELD_ST 0xC0000000 // 2-bit ST field
#define AXGMAC_AMIIM_FIELD_ST_SHIFT 30
#define AXGMAC_AMIIM_FIELD_OP 0x30000000 // 2-bit OP field
#define AXGMAC_AMIIM_FIELD_OP_SHIFT 28
#define AXGMAC_AMIIM_FIELD_PORT_ADDR 0x0F800000 // Port address field (hstphyadx in spec)
#define AXGMAC_AMIIM_FIELD_PORT_SHIFT 23
#define AXGMAC_AMIIM_FIELD_DEV_ADDR 0x007C0000 // Device address field (hstregadx in spec)
#define AXGMAC_AMIIM_FIELD_DEV_SHIFT 18
#define AXGMAC_AMIIM_FIELD_TA 0x00030000 // 2-bit TA field
#define AXGMAC_AMIIM_FIELD_TA_SHIFT 16
#define AXGMAC_AMIIM_FIELD_DATA 0x0000FFFF // Data field
// Values for the AXGMAC_AMIIM_FIELD_OP field in the A-XGMAC AMIIM Field Register
#define MIIM_OP_ADDR 0 // MIIM Address set operation
#define MIIM_OP_WRITE 1 // MIIM Write register operation
#define MIIM_OP_READ 2 // MIIM Read register operation
#define MIIM_OP_ADDR_SHIFT (MIIM_OP_ADDR << AXGMAC_AMIIM_FIELD_OP_SHIFT)
// Values for the AXGMAC_AMIIM_FIELD_PORT_ADDR field in the A-XGMAC AMIIM Field Register
#define MIIM_PORT_NUM 1 // All Sahara MIIM modules use port 1
// Values for the AXGMAC_AMIIM_FIELD_DEV_ADDR field in the A-XGMAC AMIIM Field Register
#define MIIM_DEV_PHY_PMA 1 // PHY PMA/PMD module MIIM device number
#define MIIM_DEV_PHY_PCS 3 // PHY PCS module MIIM device number
#define MIIM_DEV_PHY_XS 4 // PHY XS module MIIM device number
#define MIIM_DEV_XGXS 5 // XGXS MIIM device number
// Values for the AXGMAC_AMIIM_FIELD_TA field in the A-XGMAC AMIIM Field Register
#define MIIM_TA_10GB 2 // set to 2 for 10 GB operation
// A-XGMAC AMIIM Configuration Register
#define AXGMAC_AMIIM_CFG_NOPREAM 0x00000080 // Bypass preamble of mngmt frame
#define AXGMAC_AMIIM_CFG_HALF_CLOCK 0x0000007F // half-clock duration of MDC output
// A-XGMAC AMIIM Indicator Register
#define AXGMAC_AMIIM_INDC_LINK 0x00000010 // Link status from legacy PHY or MMD
#define AXGMAC_AMIIM_INDC_MPHY 0x00000008 // Multiple phy operation in progress
#define AXGMAC_AMIIM_INDC_SPHY 0x00000004 // Single phy operation in progress
#define AXGMAC_AMIIM_INDC_MON 0x00000002 // Single or multiple monitor cmd
#define AXGMAC_AMIIM_INDC_BUSY 0x00000001 // Set until cmd operation complete
// Link Status and Control Register
#define LS_PHY_CLR_RESET 0x80000000 // Clear reset signal to PHY
#define LS_SERDES_POWER_DOWN 0x40000000 // Power down the Sahara Serdes
#define LS_XGXS_ENABLE 0x20000000 // Enable the XAUI XGXS logic
#define LS_XGXS_CTL 0x10000000 // Hold XAUI XGXS logic reset until Serdes is up
#define LS_SERDES_DOWN 0x08000000 // When 0, XAUI Serdes is up and initialization is complete
#define LS_TRACE_DOWN 0x04000000 // When 0, Trace Serdes is up and initialization is complete
#define LS_PHY_CLK_25MHZ 0x02000000 // Set PHY clock to 25 MHz (else 156.125 MHz)
#define LS_PHY_CLK_EN 0x01000000 // Enable clock to PHY
#define LS_XAUI_LINK_UP 0x00000010 // XAUI link is up
#define LS_XAUI_LINK_CHNG 0x00000008 // XAUI link status has changed
#define LS_LINK_ALARM 0x00000004 // Link alarm pin
#define LS_ATTN_CTRL_MASK 0x00000003 // Mask link attention control bits
#define LS_ATTN_ALARM 0x00000000 // 00 => Attn on link alarm
#define LS_ATTN_ALARM_OR_STAT_CHNG 0x00000001 // 01 => Attn on link alarm or status change
#define LS_ATTN_STAT_CHNG 0x00000002 // 10 => Attn on link status change
#define LS_ATTN_NONE 0x00000003 // 11 => no Attn
// Link Address High Registers
#define LINK_ADDR_ENABLE 0x80000000 // Enable this link address
/***************************************************************************
* XGXS XAUI XGMII Extender registers
*
* Full register descriptions can be found in mxgxs.pdf
***************************************************************************/
// XGXS Register Map
#define XGXS_ADDRESS_CONTROL1 0x0000 // XS Control 1
#define XGXS_ADDRESS_STATUS1 0x0001 // XS Status 1
#define XGXS_ADDRESS_DEVID_LOW 0x0002 // XS Device ID (low)
#define XGXS_ADDRESS_DEVID_HIGH 0x0003 // XS Device ID (high)
#define XGXS_ADDRESS_SPEED 0x0004 // XS Speed ability
#define XGXS_ADDRESS_DEV_LOW 0x0005 // XS Devices in package
#define XGXS_ADDRESS_DEV_HIGH 0x0006 // XS Devices in package
#define XGXS_ADDRESS_STATUS2 0x0008 // XS Status 2
#define XGXS_ADDRESS_PKGID_lOW 0x000E // XS Package Identifier
#define XGXS_ADDRESS_PKGID_HIGH 0x000F // XS Package Identifier
#define XGXS_ADDRESS_LANE_STATUS 0x0018 // 10G XGXS Lane Status
#define XGXS_ADDRESS_TEST_CTRL 0x0019 // 10G XGXS Test Control
#define XGXS_ADDRESS_RESET_LO1 0x8000 // Vendor-Specific Reset Lo 1
#define XGXS_ADDRESS_RESET_LO2 0x8001 // Vendor-Specific Reset Lo 2
#define XGXS_ADDRESS_RESET_HI1 0x8002 // Vendor-Specific Reset Hi 1
#define XGXS_ADDRESS_RESET_HI2 0x8003 // Vendor-Specific Reset Hi 2
// XS Control 1 register bit definitions
#define XGXS_CONTROL1_RESET 0x8000 // Reset - self clearing
#define XGXS_CONTROL1_LOOPBACK 0x4000 // Enable loopback
#define XGXS_CONTROL1_SPEED1 0x2000 // 0 = unspecified, 1 = 10Gb+
#define XGXS_CONTROL1_LOWPOWER 0x0400 // 1 = Low power mode
#define XGXS_CONTROL1_SPEED2 0x0040 // Same as SPEED1 (?)
#define XGXS_CONTROL1_SPEED 0x003C // Everything reserved except zero (?)
// XS Status 1 register bit definitions
#define XGXS_STATUS1_FAULT 0x0080 // Fault detected
#define XGXS_STATUS1_LINK 0x0004 // 1 = Link up
#define XGXS_STATUS1_LOWPOWER 0x0002 // 1 = Low power supported
// XS Speed register bit definitions
#define XGXS_SPEED_10G 0x0001 // 1 = 10G capable
// XS Devices register bit definitions
#define XGXS_DEVICES_DTE 0x0020 // DTE XS Present
#define XGXS_DEVICES_PHY 0x0010 // PHY XS Present
#define XGXS_DEVICES_PCS 0x0008 // PCS Present
#define XGXS_DEVICES_WIS 0x0004 // WIS Present
#define XGXS_DEVICES_PMD 0x0002 // PMD/PMA Present
#define XGXS_DEVICES_CLAUSE22 0x0001 // Clause 22 registers present
// XS Devices High register bit definitions
#define XGXS_DEVICES_VENDOR2 0x8000 // Vendor specific device 2
#define XGXS_DEVICES_VENDOR1 0x4000 // Vendor specific device 1
// XS Status 2 register bit definitions
#define XGXS_STATUS2_DEV_MASK 0xC000 // Device present mask
#define XGXS_STATUS2_DEV_RESPOND 0x8000 // Device responding
#define XGXS_STATUS2_XMT_FAULT 0x0800 // Transmit fault
#define XGXS_STATUS2_RCV_FAULT 0x0400 // Receive fault
// XS Package ID High register bit definitions
#define XGXS_PKGID_HIGH_ORG 0xFC00 // Organizationally Unique
#define XGXS_PKGID_HIGH_MFG 0x03F0 // Manufacturer Model
#define XGXS_PKGID_HIGH_REV 0x000F // Revision Number
// XS Lane Status register bit definitions
#define XGXS_LANE_PHY 0x1000 // PHY/DTE lane alignment status
#define XGXS_LANE_PATTERN 0x0800 // Pattern testing ability
#define XGXS_LANE_LOOPBACK 0x0400 // PHY loopback ability
#define XGXS_LANE_SYNC3 0x0008 // Lane 3 sync
#define XGXS_LANE_SYNC2 0x0004 // Lane 2 sync
#define XGXS_LANE_SYNC1 0x0002 // Lane 1 sync
#define XGXS_LANE_SYNC0 0x0001 // Lane 0 sync
// XS Test Control register bit definitions
#define XGXS_TEST_PATTERN_ENABLE 0x0004 // Test pattern enabled
#define XGXS_TEST_PATTERN_MASK 0x0003 // Test patterns
#define XGXS_TEST_PATTERN_RSVD 0x0003 // Test pattern - reserved
#define XGXS_TEST_PATTERN_MIX 0x0002 // Test pattern - mixed
#define XGXS_TEST_PATTERN_LOW 0x0001 // Test pattern - low
#define XGXS_TEST_PATTERN_HIGH 0x0001 // Test pattern - high
/***************************************************************************
* External MDIO Bus Registers
*
* Full register descriptions can be found in PHY/XENPAK/IEEE specs
***************************************************************************/
// LASI (Link Alarm Status Interrupt) Registers (located in MIIM_DEV_PHY_PMA device)
#define LASI_RX_ALARM_CONTROL 0x9000 // LASI RX_ALARM Control
#define LASI_TX_ALARM_CONTROL 0x9001 // LASI TX_ALARM Control
#define LASI_CONTROL 0x9002 // LASI Control
#define LASI_RX_ALARM_STATUS 0x9003 // LASI RX_ALARM Status
#define LASI_TX_ALARM_STATUS 0x9004 // LASI TX_ALARM Status
#define LASI_STATUS 0x9005 // LASI Status
// LASI_CONTROL bit definitions
#define LASI_CTL_RX_ALARM_ENABLE 0x0004 // Enable RX_ALARM interrupts
#define LASI_CTL_TX_ALARM_ENABLE 0x0002 // Enable TX_ALARM interrupts
#define LASI_CTL_LS_ALARM_ENABLE 0x0001 // Enable Link Status interrupts
// LASI_STATUS bit definitions
#define LASI_STATUS_RX_ALARM 0x0004 // RX_ALARM status
#define LASI_STATUS_TX_ALARM 0x0002 // TX_ALARM status
#define LASI_STATUS_LS_ALARM 0x0001 // Link Status
// PHY registers - PMA/PMD (device 1)
#define PHY_PMA_CONTROL1 0x0000 // PMA/PMD Control 1
#define PHY_PMA_STATUS1 0x0001 // PMA/PMD Status 1
#define PHY_PMA_RCV_DET 0x000A // PMA/PMD Receive Signal Detect
// other PMA/PMD registers exist and can be defined as needed
// PHY registers - PCS (device 3)
#define PHY_PCS_CONTROL1 0x0000 // PCS Control 1
#define PHY_PCS_STATUS1 0x0001 // PCS Status 1
#define PHY_PCS_10G_STATUS1 0x0020 // PCS 10GBASE-R Status 1
// other PCS registers exist and can be defined as needed
// PHY registers - XS (device 4)
#define PHY_XS_CONTROL1 0x0000 // XS Control 1
#define PHY_XS_STATUS1 0x0001 // XS Status 1
#define PHY_XS_LANE_STATUS 0x0018 // XS Lane Status
// other XS registers exist and can be defined as needed
// PHY_PMA_CONTROL1 register bit definitions
#define PMA_CONTROL1_RESET 0x8000 // PMA/PMD reset
// PHY_PMA_RCV_DET register bit definitions
#define PMA_RCV_DETECT 0x0001 // PMA/PMD receive signal detect
// PHY_PCS_10G_STATUS1 register bit definitions
#define PCS_10B_BLOCK_LOCK 0x0001 // PCS 10GBASE-R locked to receive blocks
// PHY_XS_LANE_STATUS register bit definitions
#define XS_LANE_ALIGN 0x1000 // XS transmit lanes aligned
// PHY Microcode download data structure
typedef struct _PHY_UCODE {
ushort Addr;
ushort Data;
} PHY_UCODE, *PPHY_UCODE;
/*****************************************************************************
* Transmit Sequencer Command Descriptor definitions
*****************************************************************************/
// This descriptor must be placed in GRAM. The address of this descriptor
// (along with a couple of control bits) is pushed onto the PxhCmdQ or PxlCmdQ
// (Proxy high or low command queue). This data is read by the Proxy Sequencer,
// which pushes it onto the XmtCmdQ, which is (eventually) read by the Transmit
// Sequencer, causing a packet to be transmitted. Not all fields are valid for
// all commands - see the Sahara spec for details. Note that this structure is
// only valid when compiled on a little endian machine.
#pragma pack(push, 1)
typedef struct _XMT_DESC {
ushort XmtLen; // word 0, bits [15:0] - transmit length
unsigned char XmtCtl; // word 0, bits [23:16] - transmit control byte
unsigned char Cmd; // word 0, bits [31:24] - transmit command plus misc.
u32 XmtBufId; // word 1, bits [31:0] - transmit buffer ID
unsigned char TcpStrt; // word 2, bits [7:0] - byte address of TCP header
unsigned char IpStrt; // word 2, bits [15:8] - byte address of IP header
ushort IpCkSum; // word 2, bits [31:16] - partial IP checksum
ushort TcpCkSum; // word 3, bits [15:0] - partial TCP checksum
ushort Rsvd1; // word 3, bits [31:16] - PAD
u32 Rsvd2; // word 4, bits [31:0] - PAD
u32 Rsvd3; // word 5, bits [31:0] - PAD
u32 Rsvd4; // word 6, bits [31:0] - PAD
u32 Rsvd5; // word 7, bits [31:0] - PAD
} XMT_DESC, *PXMT_DESC;
#pragma pack(pop)
// XMT_DESC Cmd byte definitions
// command codes
#define XMT_DESC_CMD_RAW_SEND 0 // raw send descriptor
#define XMT_DESC_CMD_CSUM_INSERT 1 // checksum insert descriptor
#define XMT_DESC_CMD_FORMAT 2 // format descriptor
#define XMT_DESC_CMD_PRIME 3 // prime descriptor
#define XMT_DESC_CMD_CODE_SHFT 6 // comand code shift (shift to bits [31:30] in word 0)
// shifted command codes
#define XMT_RAW_SEND (XMT_DESC_CMD_RAW_SEND << XMT_DESC_CMD_CODE_SHFT)
#define XMT_CSUM_INSERT (XMT_DESC_CMD_CSUM_INSERT << XMT_DESC_CMD_CODE_SHFT)
#define XMT_FORMAT (XMT_DESC_CMD_FORMAT << XMT_DESC_CMD_CODE_SHFT)
#define XMT_PRIME (XMT_DESC_CMD_PRIME << XMT_DESC_CMD_CODE_SHFT)
// XMT_DESC Control Byte (XmtCtl) definitions
// NOTE: These bits do not work on Sahara (Rev A)!
#define XMT_CTL_PAUSE_FRAME 0x80 // current frame is a pause control frame (for statistics)
#define XMT_CTL_CONTROL_FRAME 0x40 // current frame is a control frame (for statistics)
#define XMT_CTL_PER_PKT_QUAL 0x20 // per packet qualifier
#define XMT_CTL_PAD_MODE_NONE 0x00 // do not pad frame
#define XMT_CTL_PAD_MODE_64 0x08 // pad frame to 64 bytes
#define XMT_CTL_PAD_MODE_VLAN_68 0x10 // pad frame to 64 bytes, and VLAN frames to 68 bytes
#define XMT_CTL_PAD_MODE_68 0x18 // pad frame to 68 bytes
#define XMT_CTL_GEN_FCS 0x04 // generate FCS (CRC) for this frame
#define XMT_CTL_DELAY_FCS_0 0x00 // do not delay FCS calcution
#define XMT_CTL_DELAY_FCS_1 0x01 // delay FCS calculation by 1 (4-byte) word
#define XMT_CTL_DELAY_FCS_2 0x02 // delay FCS calculation by 2 (4-byte) words
#define XMT_CTL_DELAY_FCS_3 0x03 // delay FCS calculation by 3 (4-byte) words
// XMT_DESC XmtBufId definition
#define XMT_BUF_ID_SHFT 8 // The Xmt buffer ID is formed by dividing
// the buffer (DRAM) address by 256 (or << 8)
/*****************************************************************************
* Receiver Sequencer Definitions
*****************************************************************************/
// Receive Event Queue (queues 3 - 6) bit definitions
#define RCV_EVTQ_RBFID_MASK 0x0000FFFF // bit mask for the Receive Buffer ID
// Receive Buffer ID definition
#define RCV_BUF_ID_SHFT 5 // The Rcv buffer ID is formed by dividing
// the buffer (DRAM) address by 32 (or << 5)
// Format of the 18 byte Receive Buffer returned by the
// Receive Sequencer for received packets
#pragma pack(push, 1)
typedef struct _RCV_BUF_HDR {
u32 Status; // Status word from Rcv Seq Parser
ushort Length; // Rcv packet byte count
union {
ushort TcpCsum; // TCP checksum
struct {
unsigned char TcpCsumL; // lower 8 bits of the TCP checksum
unsigned char LinkHash; // Link hash (multicast frames only)
};
};
ushort SktHash; // Socket hash
unsigned char TcpHdrOffset; // TCP header offset into packet
unsigned char IpHdrOffset; // IP header offset into packet
u32 TpzHash; // Toeplitz hash
ushort Reserved; // Reserved
} RCV_BUF_HDR, *PRCV_BUF_HDR;
#pragma pack(pop)
/*****************************************************************************
* Queue definitions
*****************************************************************************/
// Ingress (read only) queue numbers
#define PXY_BUF_Q 0 // Proxy Buffer Queue
#define HST_EVT_Q 1 // Host Event Queue
#define XMT_BUF_Q 2 // Transmit Buffer Queue
#define SKT_EVL_Q 3 // RcvSqr Socket Event Low Priority Queue
#define RCV_EVL_Q 4 // RcvSqr Rcv Event Low Priority Queue
#define SKT_EVH_Q 5 // RcvSqr Socket Event High Priority Queue
#define RCV_EVH_Q 6 // RcvSqr Rcv Event High Priority Queue
#define DMA_RSP_Q 7 // Dma Response Queue - one per CPU context
// Local (read/write) queue numbers
#define LOCAL_A_Q 8 // Spare local Queue
#define LOCAL_B_Q 9 // Spare local Queue
#define LOCAL_C_Q 10 // Spare local Queue
#define FSM_EVT_Q 11 // Finite-State-Machine Event Queue
#define SBF_PAL_Q 12 // System Buffer Physical Address (low) Queue
#define SBF_PAH_Q 13 // System Buffer Physical Address (high) Queue
#define SBF_VAL_Q 14 // System Buffer Virtual Address (low) Queue
#define SBF_VAH_Q 15 // System Buffer Virtual Address (high) Queue
// Egress (write only) queue numbers
#define H2G_CMD_Q 16 // Host to GlbRam DMA Command Queue
#define H2D_CMD_Q 17 // Host to DRAM DMA Command Queue
#define G2H_CMD_Q 18 // GlbRam to Host DMA Command Queue
#define G2D_CMD_Q 19 // GlbRam to DRAM DMA Command Queue
#define D2H_CMD_Q 20 // DRAM to Host DMA Command Queue
#define D2G_CMD_Q 21 // DRAM to GlbRam DMA Command Queue
#define D2D_CMD_Q 22 // DRAM to DRAM DMA Command Queue
#define PXL_CMD_Q 23 // Low Priority Proxy Command Queue
#define PXH_CMD_Q 24 // High Priority Proxy Command Queue
#define RSQ_CMD_Q 25 // Receive Sequencer Command Queue
#define RCV_BUF_Q 26 // Receive Buffer Queue
// Bit definitions for the Proxy Command queues (PXL_CMD_Q and PXH_CMD_Q)
#define PXY_COPY_EN 0x00200000 // enable copy of xmt descriptor to xmt command queue
#define PXY_SIZE_16 0x00000000 // copy 16 bytes
#define PXY_SIZE_32 0x00100000 // copy 32 bytes
/*****************************************************************************
* SXG EEPROM/Flash Configuration Definitions
*****************************************************************************/
#pragma pack(push, 1)
//
typedef struct _HW_CFG_DATA {
ushort Addr;
union {
ushort Data;
ushort Checksum;
};
} HW_CFG_DATA, *PHW_CFG_DATA;
//
#define NUM_HW_CFG_ENTRIES ((128/sizeof(HW_CFG_DATA)) - 4)
// MAC address
typedef struct _SXG_CONFIG_MAC {
unsigned char MacAddr[6]; // MAC Address
} SXG_CONFIG_MAC, *PSXG_CONFIG_MAC;
//
typedef struct _ATK_FRU {
unsigned char PartNum[6];
unsigned char Revision[2];
unsigned char Serial[14];
} ATK_FRU, *PATK_FRU;
// OEM FRU Format types
#define ATK_FRU_FORMAT 0x0000
#define CPQ_FRU_FORMAT 0x0001
#define DELL_FRU_FORMAT 0x0002
#define HP_FRU_FORMAT 0x0003
#define IBM_FRU_FORMAT 0x0004
#define EMC_FRU_FORMAT 0x0005
#define NO_FRU_FORMAT 0xFFFF
// EEPROM/Flash Format
typedef struct _SXG_CONFIG {
//
// Section 1 (128 bytes)
//
ushort MagicWord; // EEPROM/FLASH Magic code 'A5A5'
ushort SpiClks; // SPI bus clock dividers
HW_CFG_DATA HwCfg[NUM_HW_CFG_ENTRIES];
//
//
//
ushort Version; // EEPROM format version
SXG_CONFIG_MAC MacAddr[4]; // space for 4 MAC addresses
ATK_FRU AtkFru; // FRU information
ushort OemFruFormat; // OEM FRU format type
unsigned char OemFru[76]; // OEM FRU information (optional)
ushort Checksum; // Checksum of section 2
// CS info XXXTODO
} SXG_CONFIG, *PSXG_CONFIG;
#pragma pack(pop)
/*****************************************************************************
* Miscellaneous Hardware definitions
*****************************************************************************/
// Sahara (ASIC level) defines
#define SAHARA_GRAM_SIZE 0x020000 // GRAM size - 128 KB
#define SAHARA_DRAM_SIZE 0x200000 // DRAM size - 2 MB
#define SAHARA_QRAM_SIZE 0x004000 // QRAM size - 16K entries (64 KB)
#define SAHARA_WCS_SIZE 0x002000 // WCS - 8K instructions (x 108 bits)
// Arabia (board level) defines
#define FLASH_SIZE 0x080000 // 512 KB (4 Mb)
#define EEPROM_SIZE_XFMR 512 // true EEPROM size (bytes), including xfmr area
#define EEPROM_SIZE_NO_XFMR 256 // EEPROM size excluding xfmr area
/*
* Copyright (C) 1997-2008 Alacritech, Inc. All rights reserved
*
* sxgphycode.h:
*
* This file PHY microcode and register initialization data.
*/
/**********************************************************************
* PHY Microcode
*
* The following contains both PHY microcode and PHY register
* initialization data. It is specific to both the PHY and the
* type of transceiver.
*
**********************************************************************/
/*
* Download for AEL2005C PHY with SR/LR transceiver (10GBASE-SR or 10GBASE-LR)
*/
static PHY_UCODE PhyUcode[] = {
/*
* NOTE: An address of 0 is a special case. When the download routine
* sees an address of 0, it does not write to the PHY. Instead, it
* delays the download. The length of the delay (in ms) is given in
* the data field.
*
* Delays are required at certain points.
*/
/*
* Platform-specific MDIO Patches:
* (include patches for 10G RX polarity flip, 50Mhz Synth, etc)
*/
/* Addr, Data */
{0xc017, 0xfeb0}, /* flip RX_LOS polarity (mandatory */
/* patch for SFP+ applications) */
{0xC001, 0x0428}, /* flip RX serial polarity */
{0xc013, 0xf341}, /* invert lxmit clock (mandatory patch) */
{0xc210, 0x8000}, /* reset datapath (mandatory patch) */
{0xc210, 0x8100}, /* reset datapath (mandatory patch) */
{0xc210, 0x8000}, /* reset datapath (mandatory patch) */
{0xc210, 0x0000}, /* reset datapath (mandatory patch) */
{0x0000, 0x0032}, /* wait for 50ms for datapath reset to */
/* complete. (mandatory patch) */
/* Configure the LED's */
{0xc214, 0x0099}, /* configure the LED drivers */
{0xc216, 0x5f5f}, /* configure the Activity LED */
{0xc217, 0x33ff}, /* configure the Link LED */
/* Transceiver-specific MDIO Patches: */
{0xc010, 0x448a}, /* (bit 14) mask out high BER input from the */
/* LOS signal in 1.000A */
/* (mandatory patch for SR code)*/
{0xc003, 0x0181}, /* (bit 7) enable the CDR inc setting in */
/* 1.C005 (mandatory patch for SR code) */
/* Transceiver-specific Microcontroller Initialization: */
{0xc04a, 0x5200}, /* activate microcontroller and pause */
{0x0000, 0x0032}, /* wait 50ms for microcontroller before */
/* writing in code. */
/* code block starts here: */
{0xcc00, 0x2009},
{0xcc01, 0x3009},
{0xcc02, 0x27ff},
{0xcc03, 0x300f},
{0xcc04, 0x200c},
{0xcc05, 0x300c},
{0xcc06, 0x20c4},
{0xcc07, 0x3c04},
{0xcc08, 0x6437},
{0xcc09, 0x20c4},
{0xcc0a, 0x3c04},
{0xcc0b, 0x6437},
{0xcc0c, 0x25c4},
{0xcc0d, 0x3c54},
{0xcc0e, 0x6724},
{0xcc0f, 0x25c4},
{0xcc10, 0x3c54},
{0xcc11, 0x6724},
{0xcc12, 0x2042},
{0xcc13, 0x3012},
{0xcc14, 0x1002},
{0xcc15, 0x2482},
{0xcc16, 0x3012},
{0xcc17, 0x1002},
{0xcc18, 0x2a32},
{0xcc19, 0x3002},
{0xcc1a, 0x1002},
{0xcc1b, 0x200d},
{0xcc1c, 0x304d},
{0xcc1d, 0x2862},
{0xcc1e, 0x3012},
{0xcc1f, 0x1002},
{0xcc20, 0x2982},
{0xcc21, 0x3002},
{0xcc22, 0x1002},
{0xcc23, 0x628f},
{0xcc24, 0x20a4},
{0xcc25, 0x3004},
{0xcc26, 0x6438},
{0xcc27, 0x20a4},
{0xcc28, 0x3004},
{0xcc29, 0x6438},
{0xcc2a, 0x2015},
{0xcc2b, 0x3005},
{0xcc2c, 0x5853},
{0xcc2d, 0x2bd2},
{0xcc2e, 0x3002},
{0xcc2f, 0x1342},
{0xcc30, 0x200c},
{0xcc31, 0x300c},
{0xcc32, 0x2ff7},
{0xcc33, 0x30f7},
{0xcc34, 0x20c4},
{0xcc35, 0x3c04},
{0xcc36, 0x6724},
{0xcc37, 0x20c4},
{0xcc38, 0x3c04},
{0xcc39, 0x6724},
{0xcc3a, 0x2d32},
{0xcc3b, 0x3002},
{0xcc3c, 0x1002},
{0xcc3d, 0x2008},
{0xcc3e, 0x3008},
{0xcc3f, 0x5c83},
{0xcc40, 0x2d52},
{0xcc41, 0x3002},
{0xcc42, 0x1352},
{0xcc43, 0x2008},
{0xcc44, 0x3008},
{0xcc45, 0x5c83},
{0xcc46, 0x2d32},
{0xcc47, 0x3002},
{0xcc48, 0x1352},
{0xcc49, 0x201c},
{0xcc4a, 0x300c},
{0xcc4b, 0x200d},
{0xcc4c, 0x310d},
{0xcc4d, 0x2862},
{0xcc4e, 0x3012},
{0xcc4f, 0x1002},
{0xcc50, 0x2ed2},
{0xcc51, 0x3002},
{0xcc52, 0x1342},
{0xcc53, 0x6f72},
{0xcc54, 0x1002},
{0xcc55, 0x628f},
{0xcc56, 0x2514},
{0xcc57, 0x3c64},
{0xcc58, 0x6436},
{0xcc59, 0x2514},
{0xcc5a, 0x3c64},
{0xcc5b, 0x6436},
{0xcc5c, 0x2fa4},
{0xcc5d, 0x3cd4},
{0xcc5e, 0x6624},
{0xcc5f, 0x2fa4},
{0xcc60, 0x3cd4},
{0xcc61, 0x6624},
{0xcc62, 0x2f45},
{0xcc63, 0x3015},
{0xcc64, 0x5653},
{0xcc65, 0x2eb2},
{0xcc66, 0x3002},
{0xcc67, 0x13d2},
{0xcc68, 0x2ed2},
{0xcc69, 0x3002},
{0xcc6a, 0x1002},
{0xcc6b, 0x6f72},
{0xcc6c, 0x1002},
{0xcc6d, 0x628f},
{0xcc6e, 0x2602},
{0xcc6f, 0x3012},
{0xcc70, 0x1002},
{0xcc71, 0x200d},
{0xcc72, 0x320d},
{0xcc73, 0x2862},
{0xcc74, 0x3012},
{0xcc75, 0x1002},
{0xcc76, 0x25c4},
{0xcc77, 0x3c54},
{0xcc78, 0x6437},
{0xcc79, 0x25c4},
{0xcc7a, 0x3c54},
{0xcc7b, 0x6437},
{0xcc7c, 0x20c4},
{0xcc7d, 0x3c04},
{0xcc7e, 0x6724},
{0xcc7f, 0x20c4},
{0xcc80, 0x3c04},
{0xcc81, 0x6724},
{0xcc82, 0x6f72},
{0xcc83, 0x1002},
{0xcc84, 0x628f},
{0xcc85, 0x26f2},
{0xcc86, 0x3012},
{0xcc87, 0x1002},
{0xcc88, 0xc503},
{0xcc89, 0xd5d5},
{0xcc8a, 0xc600},
{0xcc8b, 0x2a6d},
{0xcc8c, 0xc601},
{0xcc8d, 0x2a4c},
{0xcc8e, 0xc602},
{0xcc8f, 0x0111},
{0xcc90, 0xc60c},
{0xcc91, 0x5900},
{0xcc92, 0xc710},
{0xcc93, 0x0700},
{0xcc94, 0xc718},
{0xcc95, 0x0700},
{0xcc96, 0xc720},
{0xcc97, 0x4700},
{0xcc98, 0xc801},
{0xcc99, 0x7f50},
{0xcc9a, 0xc802},
{0xcc9b, 0x7760},
{0xcc9c, 0xc803},
{0xcc9d, 0x7fce},
{0xcc9e, 0xc804},
{0xcc9f, 0x5700},
{0xcca0, 0xc805},
{0xcca1, 0x5f11},
{0xcca2, 0xc806},
{0xcca3, 0x4751},
{0xcca4, 0xc807},
{0xcca5, 0x57e1},
{0xcca6, 0xc808},
{0xcca7, 0x2700},
{0xcca8, 0xc809},
{0xcca9, 0x0000},
{0xccaa, 0xc821},
{0xccab, 0x0002},
{0xccac, 0xc822},
{0xccad, 0x0014},
{0xccae, 0xc832},
{0xccaf, 0x1186},
{0xccb0, 0xc847},
{0xccb1, 0x1e02},
{0xccb2, 0xc013},
{0xccb3, 0xf341},
{0xccb4, 0xc01a},
{0xccb5, 0x0446},
{0xccb6, 0xc024},
{0xccb7, 0x1000},
{0xccb8, 0xc025},
{0xccb9, 0x0a00},
{0xccba, 0xc026},
{0xccbb, 0x0c0c},
{0xccbc, 0xc027},
{0xccbd, 0x0c0c},
{0xccbe, 0xc029},
{0xccbf, 0x00a0},
{0xccc0, 0xc030},
{0xccc1, 0x0a00},
{0xccc2, 0xc03c},
{0xccc3, 0x001c},
{0xccc4, 0xc005},
{0xccc5, 0x7a06},
{0xccc6, 0x0000},
{0xccc7, 0x0000},
{0xccc8, 0x628f},
{0xccc9, 0x26f2},
{0xccca, 0x3012},
{0xcccb, 0x1002},
{0xcccc, 0xc620},
{0xcccd, 0x0000},
{0xccce, 0xc621},
{0xcccf, 0x003f},
{0xccd0, 0xc622},
{0xccd1, 0x0000},
{0xccd2, 0xc623},
{0xccd3, 0x0000},
{0xccd4, 0xc624},
{0xccd5, 0x0000},
{0xccd6, 0xc625},
{0xccd7, 0x0000},
{0xccd8, 0xc627},
{0xccd9, 0x0000},
{0xccda, 0xc628},
{0xccdb, 0x0000},
{0xccdc, 0xc62c},
{0xccdd, 0x0000},
{0xccde, 0x0000},
{0xccdf, 0x0000},
{0xcce0, 0x628f},
{0xcce1, 0xd019},
{0xcce2, 0x26f2},
{0xcce3, 0x3012},
{0xcce4, 0x1002},
{0xcce5, 0xc210},
{0xcce6, 0x8000},
{0xcce7, 0xc210},
{0xcce8, 0x8010},
{0xcce9, 0xc210},
{0xccea, 0x8000},
{0xcceb, 0xc210},
{0xccec, 0x0000},
{0xcced, 0x0000},
{0xccee, 0x0000},
{0xccef, 0x8221},
{0xccf0, 0x2752},
{0xccf1, 0x3012},
{0xccf2, 0x1002},
{0xccf3, 0x6f72},
{0xccf4, 0x1002},
{0xccf5, 0x2806},
{0xccf6, 0x3006},
{0xccf7, 0x2007},
{0xccf8, 0x3cc7},
{0xccf9, 0xe161},
{0xccfa, 0xc171},
{0xccfb, 0x6134},
{0xccfc, 0x6135},
{0xccfd, 0x5453},
{0xccfe, 0x2858},
{0xccff, 0x3018},
{0xcd00, 0x1348},
{0xcd01, 0x6524},
{0xcd02, 0x27b8},
{0xcd03, 0x3018},
{0xcd04, 0x1008},
{0xcd05, 0x1002},
{0xcd06, 0x628f},
{0xcd07, 0x5dd3},
{0xcd08, 0x2906},
{0xcd09, 0x3016},
{0xcd0a, 0x1306},
{0xcd0b, 0x2ff7},
{0xcd0c, 0x30f7},
{0xcd0d, 0x60b7},
{0xcd0e, 0xdffd},
{0xcd0f, 0x0008},
{0xcd10, 0x6f72},
{0xcd11, 0x1002},
{0xcd12, 0x0000},
{0xcdff, 0x0a01},
/* end of code block */
/* Unpause the microcontroller to start program */
{0xca00, 0x0080},
{0xca12, 0x0000},
{0x0000, 0x000A}, /* wait 10ms just to be safe */
{0xffff, 0xffff} /* table terminator */
};
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册