提交 1b981021 编写于 作者: L Linus Torvalds
......@@ -738,6 +738,7 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget)
short vtag;
#endif
int rx_pkt_limit = dev->quota;
unsigned long flags;
do{
/* process receive packets until we use the quota*/
......@@ -841,18 +842,19 @@ static int amd8111e_rx_poll(struct net_device *dev, int * budget)
/* Receive descriptor is empty now */
dev->quota -= num_rx_pkt;
*budget -= num_rx_pkt;
spin_lock_irqsave(&lp->lock, flags);
netif_rx_complete(dev);
/* enable receive interrupt */
writel(VAL0|RINTEN0, mmio + INTEN0);
writel(VAL2 | RDMD0, mmio + CMD0);
spin_unlock_irqrestore(&lp->lock, flags);
return 0;
rx_not_empty:
/* Do not call a netif_rx_complete */
dev->quota -= num_rx_pkt;
*budget -= num_rx_pkt;
return 1;
}
#else
......@@ -1261,18 +1263,20 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *reg
struct net_device * dev = (struct net_device *) dev_id;
struct amd8111e_priv *lp = netdev_priv(dev);
void __iomem *mmio = lp->mmio;
unsigned int intr0;
unsigned int intr0, intren0;
unsigned int handled = 1;
if(dev == NULL)
if(unlikely(dev == NULL))
return IRQ_NONE;
if (regs) spin_lock (&lp->lock);
spin_lock(&lp->lock);
/* disabling interrupt */
writel(INTREN, mmio + CMD0);
/* Read interrupt status */
intr0 = readl(mmio + INT0);
intren0 = readl(mmio + INTEN0);
/* Process all the INT event until INTR bit is clear. */
......@@ -1293,11 +1297,11 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *reg
/* Schedule a polling routine */
__netif_rx_schedule(dev);
}
else {
else if (intren0 & RINTEN0) {
printk("************Driver bug! \
interrupt while in poll\n");
/* Fix by disabling interrupts */
writel(RINT0, mmio + INT0);
/* Fix by disable receive interrupts */
writel(RINTEN0, mmio + INTEN0);
}
}
#else
......@@ -1321,7 +1325,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id, struct pt_regs *reg
err_no_interrupt:
writel( VAL0 | INTREN,mmio + CMD0);
if (regs) spin_unlock(&lp->lock);
spin_unlock(&lp->lock);
return IRQ_RETVAL(handled);
}
......
......@@ -155,9 +155,9 @@
#define DRV_NAME "e100"
#define DRV_EXT "-NAPI"
#define DRV_VERSION "3.3.6-k2"DRV_EXT
#define DRV_VERSION "3.4.8-k2"DRV_EXT
#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 1999-2004 Intel Corporation"
#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation"
#define PFX DRV_NAME ": "
#define E100_WATCHDOG_PERIOD (2 * HZ)
......@@ -210,11 +210,17 @@ static struct pci_device_id e100_id_table[] = {
INTEL_8255X_ETHERNET_DEVICE(0x1069, 6),
INTEL_8255X_ETHERNET_DEVICE(0x106A, 6),
INTEL_8255X_ETHERNET_DEVICE(0x106B, 6),
INTEL_8255X_ETHERNET_DEVICE(0x1091, 7),
INTEL_8255X_ETHERNET_DEVICE(0x1092, 7),
INTEL_8255X_ETHERNET_DEVICE(0x1093, 7),
INTEL_8255X_ETHERNET_DEVICE(0x1094, 7),
INTEL_8255X_ETHERNET_DEVICE(0x1095, 7),
INTEL_8255X_ETHERNET_DEVICE(0x1209, 0),
INTEL_8255X_ETHERNET_DEVICE(0x1229, 0),
INTEL_8255X_ETHERNET_DEVICE(0x2449, 2),
INTEL_8255X_ETHERNET_DEVICE(0x2459, 2),
INTEL_8255X_ETHERNET_DEVICE(0x245D, 2),
INTEL_8255X_ETHERNET_DEVICE(0x27DC, 7),
{ 0, }
};
MODULE_DEVICE_TABLE(pci, e100_id_table);
......@@ -269,6 +275,12 @@ enum scb_status {
rus_mask = 0x3C,
};
enum ru_state {
RU_SUSPENDED = 0,
RU_RUNNING = 1,
RU_UNINITIALIZED = -1,
};
enum scb_stat_ack {
stat_ack_not_ours = 0x00,
stat_ack_sw_gen = 0x04,
......@@ -510,7 +522,7 @@ struct nic {
struct rx *rx_to_use;
struct rx *rx_to_clean;
struct rfd blank_rfd;
int ru_running;
enum ru_state ru_running;
spinlock_t cb_lock ____cacheline_aligned;
spinlock_t cmd_lock;
......@@ -539,6 +551,7 @@ struct nic {
struct timer_list watchdog;
struct timer_list blink_timer;
struct mii_if_info mii;
struct work_struct tx_timeout_task;
enum loopback loopback;
struct mem *mem;
......@@ -770,7 +783,7 @@ static int e100_eeprom_save(struct nic *nic, u16 start, u16 count)
return 0;
}
#define E100_WAIT_SCB_TIMEOUT 40
#define E100_WAIT_SCB_TIMEOUT 20000 /* we might have to wait 100ms!!! */
static inline int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
{
unsigned long flags;
......@@ -840,6 +853,10 @@ static inline int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
* because the controller is too busy, so
* let's just queue the command and try again
* when another command is scheduled. */
if(err == -ENOSPC) {
//request a reset
schedule_work(&nic->tx_timeout_task);
}
break;
} else {
nic->cuc_cmd = cuc_resume;
......@@ -884,7 +901,7 @@ static void mdio_write(struct net_device *netdev, int addr, int reg, int data)
static void e100_get_defaults(struct nic *nic)
{
struct param_range rfds = { .min = 64, .max = 256, .count = 64 };
struct param_range rfds = { .min = 16, .max = 256, .count = 64 };
struct param_range cbs = { .min = 64, .max = 256, .count = 64 };
pci_read_config_byte(nic->pdev, PCI_REVISION_ID, &nic->rev_id);
......@@ -899,8 +916,9 @@ static void e100_get_defaults(struct nic *nic)
/* Quadwords to DMA into FIFO before starting frame transmit */
nic->tx_threshold = 0xE0;
nic->tx_command = cpu_to_le16(cb_tx | cb_i | cb_tx_sf |
((nic->mac >= mac_82558_D101_A4) ? cb_cid : 0));
/* no interrupt for every tx completion, delay = 256us if not 557*/
nic->tx_command = cpu_to_le16(cb_tx | cb_tx_sf |
((nic->mac >= mac_82558_D101_A4) ? cb_cid : cb_i));
/* Template for a freshly allocated RFD */
nic->blank_rfd.command = cpu_to_le16(cb_el);
......@@ -964,7 +982,8 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
if(nic->flags & multicast_all)
config->multicast_all = 0x1; /* 1=accept, 0=no */
if(!(nic->flags & wol_magic))
/* disable WoL when up */
if(netif_running(nic->netdev) || !(nic->flags & wol_magic))
config->magic_packet_disable = 0x1; /* 1=off, 0=on */
if(nic->mac >= mac_82558_D101_A4) {
......@@ -1203,7 +1222,9 @@ static void e100_update_stats(struct nic *nic)
}
}
e100_exec_cmd(nic, cuc_dump_reset, 0);
if(e100_exec_cmd(nic, cuc_dump_reset, 0))
DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
}
static void e100_adjust_adaptive_ifs(struct nic *nic, int speed, int duplex)
......@@ -1279,12 +1300,15 @@ static inline void e100_xmit_prepare(struct nic *nic, struct cb *cb,
struct sk_buff *skb)
{
cb->command = nic->tx_command;
/* interrupt every 16 packets regardless of delay */
if((nic->cbs_avail & ~15) == nic->cbs_avail) cb->command |= cb_i;
cb->u.tcb.tbd_array = cb->dma_addr + offsetof(struct cb, u.tcb.tbd);
cb->u.tcb.tcb_byte_count = 0;
cb->u.tcb.threshold = nic->tx_threshold;
cb->u.tcb.tbd_count = 1;
cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
skb->data, skb->len, PCI_DMA_TODEVICE));
// check for mapping failure?
cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
}
......@@ -1297,7 +1321,8 @@ static int e100_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
/* SW workaround for ICH[x] 10Mbps/half duplex Tx hang.
Issue a NOP command followed by a 1us delay before
issuing the Tx command. */
e100_exec_cmd(nic, cuc_nop, 0);
if(e100_exec_cmd(nic, cuc_nop, 0))
DPRINTK(TX_ERR, DEBUG, "exec cuc_nop failed\n");
udelay(1);
}
......@@ -1415,12 +1440,18 @@ static int e100_alloc_cbs(struct nic *nic)
return 0;
}
static inline void e100_start_receiver(struct nic *nic)
static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
{
if(!nic->rxs) return;
if(RU_SUSPENDED != nic->ru_running) return;
/* handle init time starts */
if(!rx) rx = nic->rxs;
/* (Re)start RU if suspended or idle and RFA is non-NULL */
if(!nic->ru_running && nic->rx_to_clean->skb) {
e100_exec_cmd(nic, ruc_start, nic->rx_to_clean->dma_addr);
nic->ru_running = 1;
if(rx->skb) {
e100_exec_cmd(nic, ruc_start, rx->dma_addr);
nic->ru_running = RU_RUNNING;
}
}
......@@ -1437,6 +1468,13 @@ static inline int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
if(pci_dma_mapping_error(rx->dma_addr)) {
dev_kfree_skb_any(rx->skb);
rx->skb = 0;
rx->dma_addr = 0;
return -ENOMEM;
}
/* Link the RFD to end of RFA by linking previous RFD to
* this one, and clearing EL bit of previous. */
if(rx->prev->skb) {
......@@ -1471,7 +1509,7 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
/* If data isn't ready, nothing to indicate */
if(unlikely(!(rfd_status & cb_complete)))
return -EAGAIN;
return -ENODATA;
/* Get actual data size */
actual_size = le16_to_cpu(rfd->actual_size) & 0x3FFF;
......@@ -1482,6 +1520,10 @@ static inline int e100_rx_indicate(struct nic *nic, struct rx *rx,
pci_unmap_single(nic->pdev, rx->dma_addr,
RFD_BUF_LEN, PCI_DMA_FROMDEVICE);
/* this allows for a fast restart without re-enabling interrupts */
if(le16_to_cpu(rfd->command) & cb_el)
nic->ru_running = RU_SUSPENDED;
/* Pull off the RFD and put the actual data (minus eth hdr) */
skb_reserve(skb, sizeof(struct rfd));
skb_put(skb, actual_size);
......@@ -1514,20 +1556,45 @@ static inline void e100_rx_clean(struct nic *nic, unsigned int *work_done,
unsigned int work_to_do)
{
struct rx *rx;
int restart_required = 0;
struct rx *rx_to_start = NULL;
/* are we already rnr? then pay attention!!! this ensures that
* the state machine progression never allows a start with a
* partially cleaned list, avoiding a race between hardware
* and rx_to_clean when in NAPI mode */
if(RU_SUSPENDED == nic->ru_running)
restart_required = 1;
/* Indicate newly arrived packets */
for(rx = nic->rx_to_clean; rx->skb; rx = nic->rx_to_clean = rx->next) {
if(e100_rx_indicate(nic, rx, work_done, work_to_do))
int err = e100_rx_indicate(nic, rx, work_done, work_to_do);
if(-EAGAIN == err) {
/* hit quota so have more work to do, restart once
* cleanup is complete */
restart_required = 0;
break;
} else if(-ENODATA == err)
break; /* No more to clean */
}
/* save our starting point as the place we'll restart the receiver */
if(restart_required)
rx_to_start = nic->rx_to_clean;
/* Alloc new skbs to refill list */
for(rx = nic->rx_to_use; !rx->skb; rx = nic->rx_to_use = rx->next) {
if(unlikely(e100_rx_alloc_skb(nic, rx)))
break; /* Better luck next time (see watchdog) */
}
e100_start_receiver(nic);
if(restart_required) {
// ack the rnr?
writeb(stat_ack_rnr, &nic->csr->scb.stat_ack);
e100_start_receiver(nic, rx_to_start);
if(work_done)
(*work_done)++;
}
}
static void e100_rx_clean_list(struct nic *nic)
......@@ -1535,6 +1602,8 @@ static void e100_rx_clean_list(struct nic *nic)
struct rx *rx;
unsigned int i, count = nic->params.rfds.count;
nic->ru_running = RU_UNINITIALIZED;
if(nic->rxs) {
for(rx = nic->rxs, i = 0; i < count; rx++, i++) {
if(rx->skb) {
......@@ -1548,7 +1617,6 @@ static void e100_rx_clean_list(struct nic *nic)
}
nic->rx_to_use = nic->rx_to_clean = NULL;
nic->ru_running = 0;
}
static int e100_rx_alloc_list(struct nic *nic)
......@@ -1557,6 +1625,7 @@ static int e100_rx_alloc_list(struct nic *nic)
unsigned int i, count = nic->params.rfds.count;
nic->rx_to_use = nic->rx_to_clean = NULL;
nic->ru_running = RU_UNINITIALIZED;
if(!(nic->rxs = kmalloc(sizeof(struct rx) * count, GFP_ATOMIC)))
return -ENOMEM;
......@@ -1572,6 +1641,7 @@ static int e100_rx_alloc_list(struct nic *nic)
}
nic->rx_to_use = nic->rx_to_clean = nic->rxs;
nic->ru_running = RU_SUSPENDED;
return 0;
}
......@@ -1593,7 +1663,7 @@ static irqreturn_t e100_intr(int irq, void *dev_id, struct pt_regs *regs)
/* We hit Receive No Resource (RNR); restart RU after cleaning */
if(stat_ack & stat_ack_rnr)
nic->ru_running = 0;
nic->ru_running = RU_SUSPENDED;
e100_disable_irq(nic);
netif_rx_schedule(netdev);
......@@ -1663,6 +1733,7 @@ static int e100_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
#ifdef CONFIG_PM
static int e100_asf(struct nic *nic)
{
/* ASF can be enabled from eeprom */
......@@ -1671,6 +1742,7 @@ static int e100_asf(struct nic *nic)
!(nic->eeprom[eeprom_config_asf] & eeprom_gcl) &&
((nic->eeprom[eeprom_smbus_addr] & 0xFF) != 0xFE));
}
#endif
static int e100_up(struct nic *nic)
{
......@@ -1683,13 +1755,16 @@ static int e100_up(struct nic *nic)
if((err = e100_hw_init(nic)))
goto err_clean_cbs;
e100_set_multicast_list(nic->netdev);
e100_start_receiver(nic);
e100_start_receiver(nic, 0);
mod_timer(&nic->watchdog, jiffies);
if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ,
nic->netdev->name, nic->netdev)))
goto err_no_irq;
e100_enable_irq(nic);
netif_wake_queue(nic->netdev);
netif_poll_enable(nic->netdev);
/* enable ints _after_ enabling poll, preventing a race between
* disable ints+schedule */
e100_enable_irq(nic);
return 0;
err_no_irq:
......@@ -1703,11 +1778,13 @@ static int e100_up(struct nic *nic)
static void e100_down(struct nic *nic)
{
/* wait here for poll to complete */
netif_poll_disable(nic->netdev);
netif_stop_queue(nic->netdev);
e100_hw_reset(nic);
free_irq(nic->pdev->irq, nic->netdev);
del_timer_sync(&nic->watchdog);
netif_carrier_off(nic->netdev);
netif_stop_queue(nic->netdev);
e100_clean_cbs(nic);
e100_rx_clean_list(nic);
}
......@@ -1716,6 +1793,15 @@ static void e100_tx_timeout(struct net_device *netdev)
{
struct nic *nic = netdev_priv(netdev);
/* Reset outside of interrupt context, to avoid request_irq
* in interrupt context */
schedule_work(&nic->tx_timeout_task);
}
static void e100_tx_timeout_task(struct net_device *netdev)
{
struct nic *nic = netdev_priv(netdev);
DPRINTK(TX_ERR, DEBUG, "scb.status=0x%02X\n",
readb(&nic->csr->scb.status));
e100_down(netdev_priv(netdev));
......@@ -1749,7 +1835,7 @@ static int e100_loopback_test(struct nic *nic, enum loopback loopback_mode)
mdio_write(nic->netdev, nic->mii.phy_id, MII_BMCR,
BMCR_LOOPBACK);
e100_start_receiver(nic);
e100_start_receiver(nic, 0);
if(!(skb = dev_alloc_skb(ETH_DATA_LEN))) {
err = -ENOMEM;
......@@ -1869,7 +1955,6 @@ static int e100_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
else
nic->flags &= ~wol_magic;
pci_enable_wake(nic->pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
e100_exec_cb(nic, NULL, e100_configure);
return 0;
......@@ -2223,6 +2308,7 @@ static int __devinit e100_probe(struct pci_dev *pdev,
e100_get_defaults(nic);
/* locks must be initialized before calling hw_reset */
spin_lock_init(&nic->cb_lock);
spin_lock_init(&nic->cmd_lock);
......@@ -2240,6 +2326,9 @@ static int __devinit e100_probe(struct pci_dev *pdev,
nic->blink_timer.function = e100_blink_led;
nic->blink_timer.data = (unsigned long)nic;
INIT_WORK(&nic->tx_timeout_task,
(void (*)(void *))e100_tx_timeout_task, netdev);
if((err = e100_alloc(nic))) {
DPRINTK(PROBE, ERR, "Cannot alloc driver memory, aborting.\n");
goto err_out_iounmap;
......@@ -2263,7 +2352,8 @@ static int __devinit e100_probe(struct pci_dev *pdev,
(nic->eeprom[eeprom_id] & eeprom_id_wol))
nic->flags |= wol_magic;
pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
/* ack any pending wake events, disable PME */
pci_enable_wake(pdev, 0, 0);
strcpy(netdev->name, "eth%d");
if((err = register_netdev(netdev))) {
......@@ -2335,7 +2425,10 @@ static int e100_resume(struct pci_dev *pdev)
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
e100_hw_init(nic);
/* ack any pending wake events, disable PME */
pci_enable_wake(pdev, 0, 0);
if(e100_hw_init(nic))
DPRINTK(HW, ERR, "e100_hw_init failed\n");
netif_device_attach(netdev);
if(netif_running(netdev))
......@@ -2345,6 +2438,21 @@ static int e100_resume(struct pci_dev *pdev)
}
#endif
static void e100_shutdown(struct device *dev)
{
struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct nic *nic = netdev_priv(netdev);
#ifdef CONFIG_PM
pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
#else
pci_enable_wake(pdev, 0, nic->flags & (wol_magic));
#endif
}
static struct pci_driver e100_driver = {
.name = DRV_NAME,
.id_table = e100_id_table,
......@@ -2354,6 +2462,11 @@ static struct pci_driver e100_driver = {
.suspend = e100_suspend,
.resume = e100_resume,
#endif
.driver = {
.shutdown = e100_shutdown,
}
};
static int __init e100_init_module(void)
......
/*******************************************************************************
Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
......@@ -112,6 +112,8 @@ struct e1000_adapter;
#define E1000_MAX_82544_RXD 4096
/* Supported Rx Buffer Sizes */
#define E1000_RXBUFFER_128 128 /* Used for packet split */
#define E1000_RXBUFFER_256 256 /* Used for packet split */
#define E1000_RXBUFFER_2048 2048
#define E1000_RXBUFFER_4096 4096
#define E1000_RXBUFFER_8192 8192
......@@ -137,15 +139,19 @@ struct e1000_adapter;
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define E1000_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define AUTO_ALL_MODES 0
#define E1000_EEPROM_82544_APM 0x0004
#define E1000_EEPROM_APME 0x0400
#define AUTO_ALL_MODES 0
#define E1000_EEPROM_82544_APM 0x0400
#define E1000_EEPROM_APME 0x0400
#ifndef E1000_MASTER_SLAVE
/* Switch to override PHY master/slave setting */
#define E1000_MASTER_SLAVE e1000_ms_hw_default
#endif
#define E1000_MNG_VLAN_NONE -1
/* Number of packet split data buffers (not including the header buffer) */
#define PS_PAGE_BUFFERS MAX_PS_BUFFERS-1
/* only works for sizes that are powers of 2 */
#define E1000_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1)))
......@@ -159,6 +165,9 @@ struct e1000_buffer {
uint16_t next_to_watch;
};
struct e1000_ps_page { struct page *ps_page[MAX_PS_BUFFERS]; };
struct e1000_ps_page_dma { uint64_t ps_page_dma[MAX_PS_BUFFERS]; };
struct e1000_desc_ring {
/* pointer to the descriptor ring memory */
void *desc;
......@@ -174,12 +183,19 @@ struct e1000_desc_ring {
unsigned int next_to_clean;
/* array of buffer information structs */
struct e1000_buffer *buffer_info;
/* arrays of page information for packet split */
struct e1000_ps_page *ps_page;
struct e1000_ps_page_dma *ps_page_dma;
};
#define E1000_DESC_UNUSED(R) \
((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
(R)->next_to_clean - (R)->next_to_use - 1)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
#define E1000_RX_DESC_EXT(R, i) \
(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
#define E1000_GET_DESC(R, i, type) (&(((struct type *)((R).desc))[i]))
#define E1000_RX_DESC(R, i) E1000_GET_DESC(R, i, e1000_rx_desc)
#define E1000_TX_DESC(R, i) E1000_GET_DESC(R, i, e1000_tx_desc)
......@@ -192,6 +208,7 @@ struct e1000_adapter {
struct timer_list watchdog_timer;
struct timer_list phy_info_timer;
struct vlan_group *vlgrp;
uint16_t mng_vlan_id;
uint32_t bd_number;
uint32_t rx_buffer_len;
uint32_t part_num;
......@@ -228,14 +245,23 @@ struct e1000_adapter {
boolean_t detect_tx_hung;
/* RX */
#ifdef CONFIG_E1000_NAPI
boolean_t (*clean_rx) (struct e1000_adapter *adapter, int *work_done,
int work_to_do);
#else
boolean_t (*clean_rx) (struct e1000_adapter *adapter);
#endif
void (*alloc_rx_buf) (struct e1000_adapter *adapter);
struct e1000_desc_ring rx_ring;
uint64_t hw_csum_err;
uint64_t hw_csum_good;
uint32_t rx_int_delay;
uint32_t rx_abs_int_delay;
boolean_t rx_csum;
boolean_t rx_ps;
uint32_t gorcl;
uint64_t gorcl_old;
uint16_t rx_ps_bsize0;
/* Interrupt Throttle Rate */
uint32_t itr;
......@@ -257,5 +283,8 @@ struct e1000_adapter {
int msg_enable;
#ifdef CONFIG_PCI_MSI
boolean_t have_msi;
#endif
};
#endif /* _E1000_H_ */
/*******************************************************************************
Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
......@@ -69,6 +69,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
{ "rx_crc_errors", E1000_STAT(net_stats.rx_crc_errors) },
{ "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) },
{ "rx_fifo_errors", E1000_STAT(net_stats.rx_fifo_errors) },
{ "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
{ "rx_missed_errors", E1000_STAT(net_stats.rx_missed_errors) },
{ "tx_aborted_errors", E1000_STAT(net_stats.tx_aborted_errors) },
{ "tx_carrier_errors", E1000_STAT(net_stats.tx_carrier_errors) },
......@@ -593,7 +594,7 @@ e1000_set_ringparam(struct net_device *netdev,
tx_old = adapter->tx_ring;
rx_old = adapter->rx_ring;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
if((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
if(netif_running(adapter->netdev))
......@@ -784,8 +785,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
/* Hook up test interrupt handler just for this test */
if(!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
shared_int = FALSE;
} else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ,
netdev->name, netdev)){
} else if(request_irq(irq, &e1000_test_intr, SA_SHIRQ,
netdev->name, netdev)){
*data = 1;
return -1;
}
......@@ -842,10 +843,8 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
* test failed.
*/
adapter->test_icr = 0;
E1000_WRITE_REG(&adapter->hw, IMC,
(~mask & 0x00007FFF));
E1000_WRITE_REG(&adapter->hw, ICS,
(~mask & 0x00007FFF));
E1000_WRITE_REG(&adapter->hw, IMC, ~mask & 0x00007FFF);
E1000_WRITE_REG(&adapter->hw, ICS, ~mask & 0x00007FFF);
msec_delay(10);
if(adapter->test_icr) {
......@@ -919,7 +918,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
/* Setup Tx descriptor ring and Tx buffers */
txdr->count = 80;
if(!txdr->count)
txdr->count = E1000_DEFAULT_TXD;
size = txdr->count * sizeof(struct e1000_buffer);
if(!(txdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
......@@ -974,7 +974,8 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
/* Setup Rx descriptor ring and Rx buffers */
rxdr->count = 80;
if(!rxdr->count)
rxdr->count = E1000_DEFAULT_RXD;
size = rxdr->count * sizeof(struct e1000_buffer);
if(!(rxdr->buffer_info = kmalloc(size, GFP_KERNEL))) {
......@@ -1008,7 +1009,7 @@ e1000_setup_desc_rings(struct e1000_adapter *adapter)
struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rxdr, i);
struct sk_buff *skb;
if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
if(!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
GFP_KERNEL))) {
ret_val = 6;
goto err_nomem;
......@@ -1310,31 +1311,62 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
struct e1000_desc_ring *txdr = &adapter->test_tx_ring;
struct e1000_desc_ring *rxdr = &adapter->test_rx_ring;
struct pci_dev *pdev = adapter->pdev;
int i, ret_val;
int i, j, k, l, lc, good_cnt, ret_val=0;
unsigned long time;
E1000_WRITE_REG(&adapter->hw, RDT, rxdr->count - 1);
for(i = 0; i < 64; i++) {
e1000_create_lbtest_frame(txdr->buffer_info[i].skb, 1024);
pci_dma_sync_single_for_device(pdev, txdr->buffer_info[i].dma,
txdr->buffer_info[i].length,
PCI_DMA_TODEVICE);
}
E1000_WRITE_REG(&adapter->hw, TDT, i);
msec_delay(200);
i = 0;
do {
pci_dma_sync_single_for_cpu(pdev, rxdr->buffer_info[i].dma,
rxdr->buffer_info[i].length,
PCI_DMA_FROMDEVICE);
ret_val = e1000_check_lbtest_frame(rxdr->buffer_info[i].skb,
1024);
i++;
} while (ret_val != 0 && i < 64);
/* Calculate the loop count based on the largest descriptor ring
* The idea is to wrap the largest ring a number of times using 64
* send/receive pairs during each loop
*/
if(rxdr->count <= txdr->count)
lc = ((txdr->count / 64) * 2) + 1;
else
lc = ((rxdr->count / 64) * 2) + 1;
k = l = 0;
for(j = 0; j <= lc; j++) { /* loop count loop */
for(i = 0; i < 64; i++) { /* send the packets */
e1000_create_lbtest_frame(txdr->buffer_info[i].skb,
1024);
pci_dma_sync_single_for_device(pdev,
txdr->buffer_info[k].dma,
txdr->buffer_info[k].length,
PCI_DMA_TODEVICE);
if(unlikely(++k == txdr->count)) k = 0;
}
E1000_WRITE_REG(&adapter->hw, TDT, k);
msec_delay(200);
time = jiffies; /* set the start time for the receive */
good_cnt = 0;
do { /* receive the sent packets */
pci_dma_sync_single_for_cpu(pdev,
rxdr->buffer_info[l].dma,
rxdr->buffer_info[l].length,
PCI_DMA_FROMDEVICE);
ret_val = e1000_check_lbtest_frame(
rxdr->buffer_info[l].skb,
1024);
if(!ret_val)
good_cnt++;
if(unlikely(++l == rxdr->count)) l = 0;
/* time + 20 msecs (200 msecs on 2.4) is more than
* enough time to complete the receives, if it's
* exceeded, break and error off
*/
} while (good_cnt < 64 && jiffies < (time + 20));
if(good_cnt != 64) {
ret_val = 13; /* ret_val is the same as mis-compare */
break;
}
if(jiffies >= (time + 2)) {
ret_val = 14; /* error code for time out error */
break;
}
} /* end loop count loop */
return ret_val;
}
......@@ -1354,13 +1386,12 @@ static int
e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
{
*data = 0;
if (adapter->hw.media_type == e1000_media_type_internal_serdes) {
int i = 0;
adapter->hw.serdes_link_down = TRUE;
/* on some blade server designs link establishment */
/* could take as long as 2-3 minutes. */
/* On some blade server designs, link establishment
* could take as long as 2-3 minutes */
do {
e1000_check_for_link(&adapter->hw);
if (adapter->hw.serdes_link_down == FALSE)
......@@ -1368,9 +1399,11 @@ e1000_link_test(struct e1000_adapter *adapter, uint64_t *data)
msec_delay(20);
} while (i++ < 3750);
*data = 1;
*data = 1;
} else {
e1000_check_for_link(&adapter->hw);
if(adapter->hw.autoneg) /* if auto_neg is set wait for it */
msec_delay(4000);
if(!(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU)) {
*data = 1;
......
此差异已折叠。
此差异已折叠。
此差异已折叠。
/*******************************************************************************
Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
......@@ -42,7 +42,12 @@
#include <linux/sched.h>
#ifndef msec_delay
#define msec_delay(x) msleep(x)
#define msec_delay(x) do { if(in_interrupt()) { \
/* Don't mdelay in interrupt context! */ \
BUG(); \
} else { \
msleep(x); \
} } while(0)
/* Some workarounds require millisecond delays and are run during interrupt
* context. Most notably, when establishing link, the phy may need tweaking
......@@ -96,6 +101,29 @@ typedef enum {
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
((offset) << 2)))
#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
writew((value), ((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
((offset) << 1))))
#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
readw((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
((offset) << 1)))
#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
writeb((value), ((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
(offset))))
#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
readb((a)->hw_addr + \
(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
(offset)))
#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS)
#endif /* _E1000_OSDEP_H_ */
/*******************************************************************************
Copyright(c) 1999 - 2004 Intel Corporation. All rights reserved.
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
......@@ -478,7 +478,6 @@ e1000_check_options(struct e1000_adapter *adapter)
DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
opt.name);
break;
case -1:
default:
e1000_validate_option(&adapter->itr, &opt,
adapter);
......
......@@ -110,7 +110,7 @@ struct ixgb_adapter;
#define IXGB_TX_QUEUE_WAKE 16
/* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGB_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define IXGB_RX_BUFFER_WRITE 4 /* Must be power of 2 */
/* only works for sizes that are powers of 2 */
#define IXGB_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1)))
......
......@@ -411,7 +411,7 @@ ixgb_write_eeprom(struct ixgb_hw *hw, uint16_t offset, uint16_t data)
ixgb_cleanup_eeprom(hw);
/* clear the init_ctrl_reg_1 to signify that the cache is invalidated */
ee_map->init_ctrl_reg_1 = EEPROM_ICW1_SIGNATURE_CLEAR;
ee_map->init_ctrl_reg_1 = le16_to_cpu(EEPROM_ICW1_SIGNATURE_CLEAR);
return;
}
......@@ -483,7 +483,7 @@ ixgb_get_eeprom_data(struct ixgb_hw *hw)
DEBUGOUT("ixgb_ee: Checksum invalid.\n");
/* clear the init_ctrl_reg_1 to signify that the cache is
* invalidated */
ee_map->init_ctrl_reg_1 = EEPROM_ICW1_SIGNATURE_CLEAR;
ee_map->init_ctrl_reg_1 = le16_to_cpu(EEPROM_ICW1_SIGNATURE_CLEAR);
return (FALSE);
}
......@@ -579,7 +579,7 @@ ixgb_get_ee_compatibility(struct ixgb_hw *hw)
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->compatibility);
return (le16_to_cpu(ee_map->compatibility));
return(0);
}
......@@ -616,7 +616,7 @@ ixgb_get_ee_init_ctrl_reg_1(struct ixgb_hw *hw)
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->init_ctrl_reg_1);
return (le16_to_cpu(ee_map->init_ctrl_reg_1));
return(0);
}
......@@ -635,7 +635,7 @@ ixgb_get_ee_init_ctrl_reg_2(struct ixgb_hw *hw)
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->init_ctrl_reg_2);
return (le16_to_cpu(ee_map->init_ctrl_reg_2));
return(0);
}
......@@ -654,7 +654,7 @@ ixgb_get_ee_subsystem_id(struct ixgb_hw *hw)
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->subsystem_id);
return (le16_to_cpu(ee_map->subsystem_id));
return(0);
}
......@@ -673,7 +673,7 @@ ixgb_get_ee_subvendor_id(struct ixgb_hw *hw)
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->subvendor_id);
return (le16_to_cpu(ee_map->subvendor_id));
return(0);
}
......@@ -692,7 +692,7 @@ ixgb_get_ee_device_id(struct ixgb_hw *hw)
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->device_id);
return (le16_to_cpu(ee_map->device_id));
return(0);
}
......@@ -711,7 +711,7 @@ ixgb_get_ee_vendor_id(struct ixgb_hw *hw)
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->vendor_id);
return (le16_to_cpu(ee_map->vendor_id));
return(0);
}
......@@ -730,7 +730,7 @@ ixgb_get_ee_swdpins_reg(struct ixgb_hw *hw)
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->swdpins_reg);
return (le16_to_cpu(ee_map->swdpins_reg));
return(0);
}
......@@ -749,7 +749,7 @@ ixgb_get_ee_d3_power(struct ixgb_hw *hw)
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->d3_power);
return (le16_to_cpu(ee_map->d3_power));
return(0);
}
......@@ -768,7 +768,7 @@ ixgb_get_ee_d0_power(struct ixgb_hw *hw)
struct ixgb_ee_map_type *ee_map = (struct ixgb_ee_map_type *)hw->eeprom;
if(ixgb_check_and_get_eeprom_data(hw) == TRUE)
return(ee_map->d0_power);
return (le16_to_cpu(ee_map->d0_power));
return(0);
}
......@@ -252,7 +252,9 @@ ixgb_get_regs(struct net_device *netdev,
uint32_t *reg_start = reg;
uint8_t i;
regs->version = (adapter->hw.device_id << 16) | adapter->hw.subsystem_id;
/* the 1 (one) below indicates an attempt at versioning, if the
* interface in ethtool or the driver this 1 should be incremented */
regs->version = (1<<24) | hw->revision_id << 16 | hw->device_id;
/* General Registers */
*reg++ = IXGB_READ_REG(hw, CTRL0); /* 0 */
......
......@@ -47,7 +47,7 @@ char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
#else
#define DRIVERNAPI "-NAPI"
#endif
char ixgb_driver_version[] = "1.0.90-k2"DRIVERNAPI;
char ixgb_driver_version[] = "1.0.95-k2"DRIVERNAPI;
char ixgb_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
/* ixgb_pci_tbl - PCI Device ID Table
......@@ -103,6 +103,7 @@ static int ixgb_change_mtu(struct net_device *netdev, int new_mtu);
static int ixgb_set_mac(struct net_device *netdev, void *p);
static irqreturn_t ixgb_intr(int irq, void *data, struct pt_regs *regs);
static boolean_t ixgb_clean_tx_irq(struct ixgb_adapter *adapter);
#ifdef CONFIG_IXGB_NAPI
static int ixgb_clean(struct net_device *netdev, int *budget);
static boolean_t ixgb_clean_rx_irq(struct ixgb_adapter *adapter,
......@@ -120,33 +121,20 @@ static void ixgb_vlan_rx_add_vid(struct net_device *netdev, uint16_t vid);
static void ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid);
static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
static int ixgb_notify_reboot(struct notifier_block *, unsigned long event,
void *ptr);
static int ixgb_suspend(struct pci_dev *pdev, uint32_t state);
#ifdef CONFIG_NET_POLL_CONTROLLER
/* for netdump / net console */
static void ixgb_netpoll(struct net_device *dev);
#endif
struct notifier_block ixgb_notifier_reboot = {
.notifier_call = ixgb_notify_reboot,
.next = NULL,
.priority = 0
};
/* Exported from other modules */
extern void ixgb_check_options(struct ixgb_adapter *adapter);
static struct pci_driver ixgb_driver = {
.name = ixgb_driver_name,
.name = ixgb_driver_name,
.id_table = ixgb_pci_tbl,
.probe = ixgb_probe,
.remove = __devexit_p(ixgb_remove),
/* Power Managment Hooks */
.suspend = NULL,
.resume = NULL
.probe = ixgb_probe,
.remove = __devexit_p(ixgb_remove),
};
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
......@@ -169,17 +157,12 @@ MODULE_LICENSE("GPL");
static int __init
ixgb_init_module(void)
{
int ret;
printk(KERN_INFO "%s - version %s\n",
ixgb_driver_string, ixgb_driver_version);
printk(KERN_INFO "%s\n", ixgb_copyright);
ret = pci_module_init(&ixgb_driver);
if(ret >= 0) {
register_reboot_notifier(&ixgb_notifier_reboot);
}
return ret;
return pci_module_init(&ixgb_driver);
}
module_init(ixgb_init_module);
......@@ -194,7 +177,6 @@ module_init(ixgb_init_module);
static void __exit
ixgb_exit_module(void)
{
unregister_reboot_notifier(&ixgb_notifier_reboot);
pci_unregister_driver(&ixgb_driver);
}
......@@ -224,8 +206,8 @@ ixgb_irq_enable(struct ixgb_adapter *adapter)
{
if(atomic_dec_and_test(&adapter->irq_sem)) {
IXGB_WRITE_REG(&adapter->hw, IMS,
IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |
IXGB_INT_RXO | IXGB_INT_LSC);
IXGB_INT_RXT0 | IXGB_INT_RXDMT0 | IXGB_INT_TXDW |
IXGB_INT_LSC);
IXGB_WRITE_FLUSH(&adapter->hw);
}
}
......@@ -1209,10 +1191,10 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
| IXGB_CONTEXT_DESC_CMD_TSE
| IXGB_CONTEXT_DESC_CMD_IP
| IXGB_CONTEXT_DESC_CMD_TCP
| IXGB_CONTEXT_DESC_CMD_RS
| IXGB_CONTEXT_DESC_CMD_IDE
| (skb->len - (hdr_len)));
if(++i == adapter->tx_ring.count) i = 0;
adapter->tx_ring.next_to_use = i;
......@@ -1247,8 +1229,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
context_desc->mss = 0;
context_desc->cmd_type_len =
cpu_to_le32(IXGB_CONTEXT_DESC_TYPE
| IXGB_TX_DESC_CMD_RS
| IXGB_TX_DESC_CMD_IDE);
| IXGB_TX_DESC_CMD_IDE);
if(++i == adapter->tx_ring.count) i = 0;
adapter->tx_ring.next_to_use = i;
......@@ -1273,6 +1254,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f;
len -= skb->data_len;
i = tx_ring->next_to_use;
......@@ -1526,14 +1508,33 @@ ixgb_change_mtu(struct net_device *netdev, int new_mtu)
void
ixgb_update_stats(struct ixgb_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
(netdev->mc_count > IXGB_MAX_NUM_MULTICAST_ADDRESSES)) {
u64 multi = IXGB_READ_REG(&adapter->hw, MPRCL);
u32 bcast_l = IXGB_READ_REG(&adapter->hw, BPRCL);
u32 bcast_h = IXGB_READ_REG(&adapter->hw, BPRCH);
u64 bcast = ((u64)bcast_h << 32) | bcast_l;
multi |= ((u64)IXGB_READ_REG(&adapter->hw, MPRCH) << 32);
/* fix up multicast stats by removing broadcasts */
multi -= bcast;
adapter->stats.mprcl += (multi & 0xFFFFFFFF);
adapter->stats.mprch += (multi >> 32);
adapter->stats.bprcl += bcast_l;
adapter->stats.bprch += bcast_h;
} else {
adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
}
adapter->stats.tprl += IXGB_READ_REG(&adapter->hw, TPRL);
adapter->stats.tprh += IXGB_READ_REG(&adapter->hw, TPRH);
adapter->stats.gprcl += IXGB_READ_REG(&adapter->hw, GPRCL);
adapter->stats.gprch += IXGB_READ_REG(&adapter->hw, GPRCH);
adapter->stats.bprcl += IXGB_READ_REG(&adapter->hw, BPRCL);
adapter->stats.bprch += IXGB_READ_REG(&adapter->hw, BPRCH);
adapter->stats.mprcl += IXGB_READ_REG(&adapter->hw, MPRCL);
adapter->stats.mprch += IXGB_READ_REG(&adapter->hw, MPRCH);
adapter->stats.uprcl += IXGB_READ_REG(&adapter->hw, UPRCL);
adapter->stats.uprch += IXGB_READ_REG(&adapter->hw, UPRCH);
adapter->stats.vprcl += IXGB_READ_REG(&adapter->hw, VPRCL);
......@@ -1823,7 +1824,6 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
struct pci_dev *pdev = adapter->pdev;
struct ixgb_rx_desc *rx_desc, *next_rxd;
struct ixgb_buffer *buffer_info, *next_buffer, *next2_buffer;
struct sk_buff *skb, *next_skb;
uint32_t length;
unsigned int i, j;
boolean_t cleaned = FALSE;
......@@ -1833,6 +1833,8 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
buffer_info = &rx_ring->buffer_info[i];
while(rx_desc->status & IXGB_RX_DESC_STATUS_DD) {
struct sk_buff *skb, *next_skb;
u8 status;
#ifdef CONFIG_IXGB_NAPI
if(*work_done >= work_to_do)
......@@ -1840,7 +1842,9 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
(*work_done)++;
#endif
status = rx_desc->status;
skb = buffer_info->skb;
prefetch(skb->data);
if(++i == rx_ring->count) i = 0;
......@@ -1855,7 +1859,6 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
next_skb = next_buffer->skb;
prefetch(next_skb);
cleaned = TRUE;
pci_unmap_single(pdev,
......@@ -1865,7 +1868,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
length = le16_to_cpu(rx_desc->length);
if(unlikely(!(rx_desc->status & IXGB_RX_DESC_STATUS_EOP))) {
if(unlikely(!(status & IXGB_RX_DESC_STATUS_EOP))) {
/* All receives must fit into a single buffer */
......@@ -1873,12 +1876,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
"length<%x>\n", length);
dev_kfree_skb_irq(skb);
rx_desc->status = 0;
buffer_info->skb = NULL;
rx_desc = next_rxd;
buffer_info = next_buffer;
continue;
goto rxdesc_done;
}
if (unlikely(rx_desc->errors
......@@ -1887,12 +1885,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
IXGB_RX_DESC_ERRORS_RXE))) {
dev_kfree_skb_irq(skb);
rx_desc->status = 0;
buffer_info->skb = NULL;
rx_desc = next_rxd;
buffer_info = next_buffer;
continue;
goto rxdesc_done;
}
/* Good Receive */
......@@ -1903,7 +1896,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
skb->protocol = eth_type_trans(skb, netdev);
#ifdef CONFIG_IXGB_NAPI
if(adapter->vlgrp && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) {
if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
le16_to_cpu(rx_desc->special) &
IXGB_RX_DESC_SPECIAL_VLAN_MASK);
......@@ -1911,7 +1904,7 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
netif_receive_skb(skb);
}
#else /* CONFIG_IXGB_NAPI */
if(adapter->vlgrp && (rx_desc->status & IXGB_RX_DESC_STATUS_VP)) {
if(adapter->vlgrp && (status & IXGB_RX_DESC_STATUS_VP)) {
vlan_hwaccel_rx(skb, adapter->vlgrp,
le16_to_cpu(rx_desc->special) &
IXGB_RX_DESC_SPECIAL_VLAN_MASK);
......@@ -1921,9 +1914,12 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter)
#endif /* CONFIG_IXGB_NAPI */
netdev->last_rx = jiffies;
rxdesc_done:
/* clean up descriptor, might be written over by hw */
rx_desc->status = 0;
buffer_info->skb = NULL;
/* use prefetched values */
rx_desc = next_rxd;
buffer_info = next_buffer;
}
......@@ -1959,8 +1955,8 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
num_group_tail_writes = IXGB_RX_BUFFER_WRITE;
/* leave one descriptor unused */
while(--cleancount > 0) {
/* leave three descriptors unused */
while(--cleancount > 2) {
rx_desc = IXGB_RX_DESC(*rx_ring, i);
skb = dev_alloc_skb(adapter->rx_buffer_len + NET_IP_ALIGN);
......@@ -1987,6 +1983,10 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter)
PCI_DMA_FROMDEVICE);
rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
/* guarantee DD bit not set now before h/w gets descriptor
* this is the rest of the workaround for h/w double
* writeback. */
rx_desc->status = 0;
if((i & ~(num_group_tail_writes- 1)) == i) {
/* Force memory writes to complete before letting h/w
......@@ -2099,54 +2099,6 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
}
}
/**
* ixgb_notify_reboot - handles OS notification of reboot event.
* @param nb notifier block, unused
* @param event Event being passed to driver to act upon
* @param p A pointer to our net device
**/
static int
ixgb_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
{
struct pci_dev *pdev = NULL;
switch(event) {
case SYS_DOWN:
case SYS_HALT:
case SYS_POWER_OFF:
while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
if (pci_dev_driver(pdev) == &ixgb_driver)
ixgb_suspend(pdev, 3);
}
}
return NOTIFY_DONE;
}
/**
* ixgb_suspend - driver suspend function called from notify.
* @param pdev pci driver structure used for passing to
* @param state power state to enter
**/
static int
ixgb_suspend(struct pci_dev *pdev, uint32_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct ixgb_adapter *adapter = netdev->priv;
netif_device_detach(netdev);
if(netif_running(netdev))
ixgb_down(adapter, TRUE);
pci_save_state(pdev);
state = (state > 0) ? 3 : 0;
pci_set_power_state(pdev, state);
msec_delay(200);
return 0;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
/*
* Polling 'interrupt' - used by things like netconsole to send skbs
......@@ -2157,6 +2109,7 @@ ixgb_suspend(struct pci_dev *pdev, uint32_t state)
static void ixgb_netpoll(struct net_device *dev)
{
struct ixgb_adapter *adapter = dev->priv;
disable_irq(adapter->pdev->irq);
ixgb_intr(adapter->pdev->irq, dev, NULL);
enable_irq(adapter->pdev->irq);
......
......@@ -45,8 +45,7 @@
/* Don't mdelay in interrupt context! */ \
BUG(); \
} else { \
set_current_state(TASK_UNINTERRUPTIBLE); \
schedule_timeout((x * HZ)/1000 + 2); \
msleep(x); \
} } while(0)
#endif
......
此差异已折叠。
......@@ -174,6 +174,7 @@ void tulip_mdio_write(struct net_device *dev, int phy_id, int location, int val)
break;
}
spin_unlock_irqrestore(&tp->mii_lock, flags);
return;
}
/* Establish sync by sending 32 logic ones. */
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册