提交 d1138cf0 编写于 作者: L Linus Torvalds

Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

...@@ -1696,11 +1696,13 @@ M: mtk-manpages@gmx.net ...@@ -1696,11 +1696,13 @@ M: mtk-manpages@gmx.net
W: ftp://ftp.kernel.org/pub/linux/docs/manpages W: ftp://ftp.kernel.org/pub/linux/docs/manpages
S: Maintained S: Maintained
MARVELL MV64340 ETHERNET DRIVER MARVELL MV643XX ETHERNET DRIVER
P: Dale Farnsworth
M: dale@farnsworth.org
P: Manish Lachwani P: Manish Lachwani
L: linux-mips@linux-mips.org M: mlachwani@mvista.com
L: netdev@vger.kernel.org L: netdev@vger.kernel.org
S: Supported S: Odd Fixes for 2.4; Maintained for 2.6.
MATROX FRAMEBUFFER DRIVER MATROX FRAMEBUFFER DRIVER
P: Petr Vandrovec P: Petr Vandrovec
......
...@@ -2136,7 +2136,7 @@ static int __init b44_init(void) ...@@ -2136,7 +2136,7 @@ static int __init b44_init(void)
/* Setup paramaters for syncing RX/TX DMA descriptors */ /* Setup paramaters for syncing RX/TX DMA descriptors */
dma_desc_align_mask = ~(dma_desc_align_size - 1); dma_desc_align_mask = ~(dma_desc_align_size - 1);
dma_desc_sync_size = max(dma_desc_align_size, sizeof(struct dma_desc)); dma_desc_sync_size = max_t(unsigned int, dma_desc_align_size, sizeof(struct dma_desc));
return pci_module_init(&b44_driver); return pci_module_init(&b44_driver);
} }
......
/******************************************************************************* /*******************************************************************************
Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free under the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your option) Software Foundation; either version 2 of the License, or (at your option)
any later version. any later version.
This program is distributed in the hope that it will be useful, but WITHOUT This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details. more details.
You should have received a copy of the GNU General Public License along with You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59 this program; if not, write to the Free Software Foundation, Inc., 59
Temple Place - Suite 330, Boston, MA 02111-1307, USA. Temple Place - Suite 330, Boston, MA 02111-1307, USA.
The full GNU General Public License is included in this distribution in the The full GNU General Public License is included in this distribution in the
file called LICENSE. file called LICENSE.
Contact Information: Contact Information:
Linux NICS <linux.nics@intel.com> Linux NICS <linux.nics@intel.com>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
...@@ -160,7 +160,7 @@ ...@@ -160,7 +160,7 @@
#define DRV_NAME "e100" #define DRV_NAME "e100"
#define DRV_EXT "-NAPI" #define DRV_EXT "-NAPI"
#define DRV_VERSION "3.4.14-k4"DRV_EXT #define DRV_VERSION "3.5.10-k2"DRV_EXT
#define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver" #define DRV_DESCRIPTION "Intel(R) PRO/100 Network Driver"
#define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation" #define DRV_COPYRIGHT "Copyright(c) 1999-2005 Intel Corporation"
#define PFX DRV_NAME ": " #define PFX DRV_NAME ": "
...@@ -320,7 +320,7 @@ enum cuc_dump { ...@@ -320,7 +320,7 @@ enum cuc_dump {
cuc_dump_complete = 0x0000A005, cuc_dump_complete = 0x0000A005,
cuc_dump_reset_complete = 0x0000A007, cuc_dump_reset_complete = 0x0000A007,
}; };
enum port { enum port {
software_reset = 0x0000, software_reset = 0x0000,
selftest = 0x0001, selftest = 0x0001,
...@@ -715,10 +715,10 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr) ...@@ -715,10 +715,10 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs; ctrl = (cmd_addr_data & (1 << i)) ? eecs | eedi : eecs;
writeb(ctrl, &nic->csr->eeprom_ctrl_lo); writeb(ctrl, &nic->csr->eeprom_ctrl_lo);
e100_write_flush(nic); udelay(4); e100_write_flush(nic); udelay(4);
writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo); writeb(ctrl | eesk, &nic->csr->eeprom_ctrl_lo);
e100_write_flush(nic); udelay(4); e100_write_flush(nic); udelay(4);
/* Eeprom drives a dummy zero to EEDO after receiving /* Eeprom drives a dummy zero to EEDO after receiving
* complete address. Use this to adjust addr_len. */ * complete address. Use this to adjust addr_len. */
ctrl = readb(&nic->csr->eeprom_ctrl_lo); ctrl = readb(&nic->csr->eeprom_ctrl_lo);
...@@ -726,7 +726,7 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr) ...@@ -726,7 +726,7 @@ static u16 e100_eeprom_read(struct nic *nic, u16 *addr_len, u16 addr)
*addr_len -= (i - 16); *addr_len -= (i - 16);
i = 17; i = 17;
} }
data = (data << 1) | (ctrl & eedo ? 1 : 0); data = (data << 1) | (ctrl & eedo ? 1 : 0);
} }
...@@ -1170,7 +1170,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb) ...@@ -1170,7 +1170,7 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
0x00000000, 0x00000000, 0x00000000, 0x00000000, \ 0x00000000, 0x00000000, 0x00000000, 0x00000000, \
} }
static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb) static void e100_setup_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{ {
/* *INDENT-OFF* */ /* *INDENT-OFF* */
static struct { static struct {
...@@ -1213,13 +1213,13 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb) ...@@ -1213,13 +1213,13 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
* driver can change the algorithm. * driver can change the algorithm.
* *
* INTDELAY - This loads the dead-man timer with its inital value. * INTDELAY - This loads the dead-man timer with its inital value.
* When this timer expires the interrupt is asserted, and the * When this timer expires the interrupt is asserted, and the
* timer is reset each time a new packet is received. (see * timer is reset each time a new packet is received. (see
* BUNDLEMAX below to set the limit on number of chained packets) * BUNDLEMAX below to set the limit on number of chained packets)
* The current default is 0x600 or 1536. Experiments show that * The current default is 0x600 or 1536. Experiments show that
* the value should probably stay within the 0x200 - 0x1000. * the value should probably stay within the 0x200 - 0x1000.
* *
* BUNDLEMAX - * BUNDLEMAX -
* This sets the maximum number of frames that will be bundled. In * This sets the maximum number of frames that will be bundled. In
* some situations, such as the TCP windowing algorithm, it may be * some situations, such as the TCP windowing algorithm, it may be
* better to limit the growth of the bundle size than let it go as * better to limit the growth of the bundle size than let it go as
...@@ -1229,7 +1229,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb) ...@@ -1229,7 +1229,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
* an interrupt for every frame received. If you do not want to put * an interrupt for every frame received. If you do not want to put
* a limit on the bundle size, set this value to xFFFF. * a limit on the bundle size, set this value to xFFFF.
* *
* BUNDLESMALL - * BUNDLESMALL -
* This contains a bit-mask describing the minimum size frame that * This contains a bit-mask describing the minimum size frame that
* will be bundled. The default masks the lower 7 bits, which means * will be bundled. The default masks the lower 7 bits, which means
* that any frame less than 128 bytes in length will not be bundled, * that any frame less than 128 bytes in length will not be bundled,
...@@ -1244,7 +1244,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb) ...@@ -1244,7 +1244,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
* *
* The current default is 0xFF80, which masks out the lower 7 bits. * The current default is 0xFF80, which masks out the lower 7 bits.
* This means that any frame which is x7F (127) bytes or smaller * This means that any frame which is x7F (127) bytes or smaller
* will cause an immediate interrupt. Because this value must be a * will cause an immediate interrupt. Because this value must be a
* bit mask, there are only a few valid values that can be used. To * bit mask, there are only a few valid values that can be used. To
* turn this feature off, the driver can write the value xFFFF to the * turn this feature off, the driver can write the value xFFFF to the
* lower word of this instruction (in the same way that the other * lower word of this instruction (in the same way that the other
...@@ -1253,7 +1253,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb) ...@@ -1253,7 +1253,7 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
* standard Ethernet frames are <= 2047 bytes in length. * standard Ethernet frames are <= 2047 bytes in length.
*************************************************************************/ *************************************************************************/
/* if you wish to disable the ucode functionality, while maintaining the /* if you wish to disable the ucode functionality, while maintaining the
* workarounds it provides, set the following defines to: * workarounds it provides, set the following defines to:
* BUNDLESMALL 0 * BUNDLESMALL 0
* BUNDLEMAX 1 * BUNDLEMAX 1
...@@ -1284,12 +1284,46 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb) ...@@ -1284,12 +1284,46 @@ static void e100_load_ucode(struct nic *nic, struct cb *cb, struct sk_buff *skb)
for (i = 0; i < UCODE_SIZE; i++) for (i = 0; i < UCODE_SIZE; i++)
cb->u.ucode[i] = cpu_to_le32(ucode[i]); cb->u.ucode[i] = cpu_to_le32(ucode[i]);
cb->command = cpu_to_le16(cb_ucode); cb->command = cpu_to_le16(cb_ucode | cb_el);
return; return;
} }
noloaducode: noloaducode:
cb->command = cpu_to_le16(cb_nop); cb->command = cpu_to_le16(cb_nop | cb_el);
}
static inline int e100_exec_cb_wait(struct nic *nic, struct sk_buff *skb,
void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
{
int err = 0, counter = 50;
struct cb *cb = nic->cb_to_clean;
if ((err = e100_exec_cb(nic, NULL, e100_setup_ucode)))
DPRINTK(PROBE,ERR, "ucode cmd failed with error %d\n", err);
/* must restart cuc */
nic->cuc_cmd = cuc_start;
/* wait for completion */
e100_write_flush(nic);
udelay(10);
/* wait for possibly (ouch) 500ms */
while (!(cb->status & cpu_to_le16(cb_complete))) {
msleep(10);
if (!--counter) break;
}
/* ack any interupts, something could have been set */
writeb(~0, &nic->csr->scb.stat_ack);
/* if the command failed, or is not OK, notify and return */
if (!counter || !(cb->status & cpu_to_le16(cb_ok))) {
DPRINTK(PROBE,ERR, "ucode load failed\n");
err = -EPERM;
}
return err;
} }
static void e100_setup_iaaddr(struct nic *nic, struct cb *cb, static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
...@@ -1357,13 +1391,13 @@ static int e100_phy_init(struct nic *nic) ...@@ -1357,13 +1391,13 @@ static int e100_phy_init(struct nic *nic)
mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong); mdio_write(netdev, nic->mii.phy_id, MII_NSC_CONG, cong);
} }
if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && if((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
(mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000))) { (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000))) {
/* enable/disable MDI/MDI-X auto-switching. /* enable/disable MDI/MDI-X auto-switching.
MDI/MDI-X auto-switching is disabled for 82551ER/QM chips */ MDI/MDI-X auto-switching is disabled for 82551ER/QM chips */
if((nic->mac == mac_82551_E) || (nic->mac == mac_82551_F) || if((nic->mac == mac_82551_E) || (nic->mac == mac_82551_F) ||
(nic->mac == mac_82551_10) || (nic->mii.force_media) || (nic->mac == mac_82551_10) || (nic->mii.force_media) ||
!(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled)) !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))
mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 0); mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, 0);
else else
mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, NCONFIG_AUTO_SWITCH); mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, NCONFIG_AUTO_SWITCH);
...@@ -1388,7 +1422,7 @@ static int e100_hw_init(struct nic *nic) ...@@ -1388,7 +1422,7 @@ static int e100_hw_init(struct nic *nic)
return err; return err;
if((err = e100_exec_cmd(nic, ruc_load_base, 0))) if((err = e100_exec_cmd(nic, ruc_load_base, 0)))
return err; return err;
if((err = e100_exec_cb(nic, NULL, e100_load_ucode))) if ((err = e100_exec_cb_wait(nic, NULL, e100_setup_ucode)))
return err; return err;
if((err = e100_exec_cb(nic, NULL, e100_configure))) if((err = e100_exec_cb(nic, NULL, e100_configure)))
return err; return err;
...@@ -1493,7 +1527,7 @@ static void e100_update_stats(struct nic *nic) ...@@ -1493,7 +1527,7 @@ static void e100_update_stats(struct nic *nic)
} }
} }
if(e100_exec_cmd(nic, cuc_dump_reset, 0)) if(e100_exec_cmd(nic, cuc_dump_reset, 0))
DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n"); DPRINTK(TX_ERR, DEBUG, "exec cuc_dump_reset failed\n");
} }
...@@ -1542,10 +1576,10 @@ static void e100_watchdog(unsigned long data) ...@@ -1542,10 +1576,10 @@ static void e100_watchdog(unsigned long data)
mii_check_link(&nic->mii); mii_check_link(&nic->mii);
/* Software generated interrupt to recover from (rare) Rx /* Software generated interrupt to recover from (rare) Rx
* allocation failure. * allocation failure.
* Unfortunately have to use a spinlock to not re-enable interrupts * Unfortunately have to use a spinlock to not re-enable interrupts
* accidentally, due to hardware that shares a register between the * accidentally, due to hardware that shares a register between the
* interrupt mask bit and the SW Interrupt generation bit */ * interrupt mask bit and the SW Interrupt generation bit */
spin_lock_irq(&nic->cmd_lock); spin_lock_irq(&nic->cmd_lock);
writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi); writeb(readb(&nic->csr->scb.cmd_hi) | irq_sw_gen,&nic->csr->scb.cmd_hi);
spin_unlock_irq(&nic->cmd_lock); spin_unlock_irq(&nic->cmd_lock);
...@@ -1830,7 +1864,7 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done, ...@@ -1830,7 +1864,7 @@ static void e100_rx_clean(struct nic *nic, unsigned int *work_done,
struct rx *rx_to_start = NULL; struct rx *rx_to_start = NULL;
/* are we already rnr? then pay attention!!! this ensures that /* are we already rnr? then pay attention!!! this ensures that
* the state machine progression never allows a start with a * the state machine progression never allows a start with a
* partially cleaned list, avoiding a race between hardware * partially cleaned list, avoiding a race between hardware
* and rx_to_clean when in NAPI mode */ * and rx_to_clean when in NAPI mode */
if(RU_SUSPENDED == nic->ru_running) if(RU_SUSPENDED == nic->ru_running)
...@@ -2066,7 +2100,7 @@ static void e100_tx_timeout(struct net_device *netdev) ...@@ -2066,7 +2100,7 @@ static void e100_tx_timeout(struct net_device *netdev)
{ {
struct nic *nic = netdev_priv(netdev); struct nic *nic = netdev_priv(netdev);
/* Reset outside of interrupt context, to avoid request_irq /* Reset outside of interrupt context, to avoid request_irq
* in interrupt context */ * in interrupt context */
schedule_work(&nic->tx_timeout_task); schedule_work(&nic->tx_timeout_task);
} }
...@@ -2313,7 +2347,7 @@ static int e100_set_ringparam(struct net_device *netdev, ...@@ -2313,7 +2347,7 @@ static int e100_set_ringparam(struct net_device *netdev,
struct param_range *rfds = &nic->params.rfds; struct param_range *rfds = &nic->params.rfds;
struct param_range *cbs = &nic->params.cbs; struct param_range *cbs = &nic->params.cbs;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL; return -EINVAL;
if(netif_running(netdev)) if(netif_running(netdev))
...@@ -2631,7 +2665,9 @@ static int __devinit e100_probe(struct pci_dev *pdev, ...@@ -2631,7 +2665,9 @@ static int __devinit e100_probe(struct pci_dev *pdev,
nic->flags |= wol_magic; nic->flags |= wol_magic;
/* ack any pending wake events, disable PME */ /* ack any pending wake events, disable PME */
pci_enable_wake(pdev, 0, 0); err = pci_enable_wake(pdev, 0, 0);
if (err)
DPRINTK(PROBE, ERR, "Error clearing wake event\n");
strcpy(netdev->name, "eth%d"); strcpy(netdev->name, "eth%d");
if((err = register_netdev(netdev))) { if((err = register_netdev(netdev))) {
...@@ -2682,6 +2718,7 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state) ...@@ -2682,6 +2718,7 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
{ {
struct net_device *netdev = pci_get_drvdata(pdev); struct net_device *netdev = pci_get_drvdata(pdev);
struct nic *nic = netdev_priv(netdev); struct nic *nic = netdev_priv(netdev);
int retval;
if(netif_running(netdev)) if(netif_running(netdev))
e100_down(nic); e100_down(nic);
...@@ -2689,9 +2726,14 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state) ...@@ -2689,9 +2726,14 @@ static int e100_suspend(struct pci_dev *pdev, pm_message_t state)
netif_device_detach(netdev); netif_device_detach(netdev);
pci_save_state(pdev); pci_save_state(pdev);
pci_enable_wake(pdev, pci_choose_state(pdev, state), nic->flags & (wol_magic | e100_asf(nic))); retval = pci_enable_wake(pdev, pci_choose_state(pdev, state),
nic->flags & (wol_magic | e100_asf(nic)));
if (retval)
DPRINTK(PROBE,ERR, "Error enabling wake\n");
pci_disable_device(pdev); pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state)); retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
if (retval)
DPRINTK(PROBE,ERR, "Error %d setting power state\n", retval);
return 0; return 0;
} }
...@@ -2700,11 +2742,16 @@ static int e100_resume(struct pci_dev *pdev) ...@@ -2700,11 +2742,16 @@ static int e100_resume(struct pci_dev *pdev)
{ {
struct net_device *netdev = pci_get_drvdata(pdev); struct net_device *netdev = pci_get_drvdata(pdev);
struct nic *nic = netdev_priv(netdev); struct nic *nic = netdev_priv(netdev);
int retval;
pci_set_power_state(pdev, PCI_D0); retval = pci_set_power_state(pdev, PCI_D0);
if (retval)
DPRINTK(PROBE,ERR, "Error waking adapter\n");
pci_restore_state(pdev); pci_restore_state(pdev);
/* ack any pending wake events, disable PME */ /* ack any pending wake events, disable PME */
pci_enable_wake(pdev, 0, 0); retval = pci_enable_wake(pdev, 0, 0);
if (retval)
DPRINTK(PROBE,ERR, "Error clearing wake events\n");
if(e100_hw_init(nic)) if(e100_hw_init(nic))
DPRINTK(HW, ERR, "e100_hw_init failed\n"); DPRINTK(HW, ERR, "e100_hw_init failed\n");
...@@ -2721,12 +2768,15 @@ static void e100_shutdown(struct pci_dev *pdev) ...@@ -2721,12 +2768,15 @@ static void e100_shutdown(struct pci_dev *pdev)
{ {
struct net_device *netdev = pci_get_drvdata(pdev); struct net_device *netdev = pci_get_drvdata(pdev);
struct nic *nic = netdev_priv(netdev); struct nic *nic = netdev_priv(netdev);
int retval;
#ifdef CONFIG_PM #ifdef CONFIG_PM
pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic))); retval = pci_enable_wake(pdev, 0, nic->flags & (wol_magic | e100_asf(nic)));
#else #else
pci_enable_wake(pdev, 0, nic->flags & (wol_magic)); retval = pci_enable_wake(pdev, 0, nic->flags & (wol_magic));
#endif #endif
if (retval)
DPRINTK(PROBE,ERR, "Error enabling wake\n");
} }
...@@ -2739,7 +2789,7 @@ static struct pci_driver e100_driver = { ...@@ -2739,7 +2789,7 @@ static struct pci_driver e100_driver = {
.suspend = e100_suspend, .suspend = e100_suspend,
.resume = e100_resume, .resume = e100_resume,
#endif #endif
.shutdown = e100_shutdown, .shutdown = e100_shutdown,
}; };
static int __init e100_init_module(void) static int __init e100_init_module(void)
......
...@@ -72,10 +72,6 @@ ...@@ -72,10 +72,6 @@
#include <linux/mii.h> #include <linux/mii.h>
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#ifdef CONFIG_E1000_MQ
#include <linux/cpu.h>
#include <linux/smp.h>
#endif
#define BAR_0 0 #define BAR_0 0
#define BAR_1 1 #define BAR_1 1
...@@ -87,6 +83,10 @@ ...@@ -87,6 +83,10 @@
struct e1000_adapter; struct e1000_adapter;
#include "e1000_hw.h" #include "e1000_hw.h"
#ifdef CONFIG_E1000_MQ
#include <linux/cpu.h>
#include <linux/smp.h>
#endif
#ifdef DBG #ifdef DBG
#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args) #define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
...@@ -169,6 +169,13 @@ struct e1000_buffer { ...@@ -169,6 +169,13 @@ struct e1000_buffer {
uint16_t next_to_watch; uint16_t next_to_watch;
}; };
#ifdef CONFIG_E1000_MQ
struct e1000_queue_stats {
uint64_t packets;
uint64_t bytes;
};
#endif
struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; }; struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; }; struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; };
...@@ -191,10 +198,12 @@ struct e1000_tx_ring { ...@@ -191,10 +198,12 @@ struct e1000_tx_ring {
spinlock_t tx_lock; spinlock_t tx_lock;
uint16_t tdh; uint16_t tdh;
uint16_t tdt; uint16_t tdt;
uint64_t pkt;
boolean_t last_tx_tso; boolean_t last_tx_tso;
#ifdef CONFIG_E1000_MQ
struct e1000_queue_stats tx_stats;
#endif
}; };
struct e1000_rx_ring { struct e1000_rx_ring {
...@@ -216,9 +225,17 @@ struct e1000_rx_ring { ...@@ -216,9 +225,17 @@ struct e1000_rx_ring {
struct e1000_ps_page *ps_page; struct e1000_ps_page *ps_page;
struct e1000_ps_page_dma *ps_page_dma; struct e1000_ps_page_dma *ps_page_dma;
struct sk_buff *rx_skb_top;
struct sk_buff *rx_skb_prev;
/* cpu for rx queue */
int cpu;
uint16_t rdh; uint16_t rdh;
uint16_t rdt; uint16_t rdt;
uint64_t pkt; #ifdef CONFIG_E1000_MQ
struct e1000_queue_stats rx_stats;
#endif
}; };
#define E1000_DESC_UNUSED(R) \ #define E1000_DESC_UNUSED(R) \
...@@ -251,6 +268,9 @@ struct e1000_adapter { ...@@ -251,6 +268,9 @@ struct e1000_adapter {
uint16_t link_speed; uint16_t link_speed;
uint16_t link_duplex; uint16_t link_duplex;
spinlock_t stats_lock; spinlock_t stats_lock;
#ifdef CONFIG_E1000_NAPI
spinlock_t tx_queue_lock;
#endif
atomic_t irq_sem; atomic_t irq_sem;
struct work_struct tx_timeout_task; struct work_struct tx_timeout_task;
struct work_struct watchdog_task; struct work_struct watchdog_task;
...@@ -264,6 +284,7 @@ struct e1000_adapter { ...@@ -264,6 +284,7 @@ struct e1000_adapter {
#ifdef CONFIG_E1000_MQ #ifdef CONFIG_E1000_MQ
struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */ struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */
#endif #endif
unsigned long tx_queue_len;
uint32_t txd_cmd; uint32_t txd_cmd;
uint32_t tx_int_delay; uint32_t tx_int_delay;
uint32_t tx_abs_int_delay; uint32_t tx_abs_int_delay;
...@@ -271,9 +292,11 @@ struct e1000_adapter { ...@@ -271,9 +292,11 @@ struct e1000_adapter {
uint64_t gotcl_old; uint64_t gotcl_old;
uint64_t tpt_old; uint64_t tpt_old;
uint64_t colc_old; uint64_t colc_old;
uint32_t tx_timeout_count;
uint32_t tx_fifo_head; uint32_t tx_fifo_head;
uint32_t tx_head_addr; uint32_t tx_head_addr;
uint32_t tx_fifo_size; uint32_t tx_fifo_size;
uint8_t tx_timeout_factor;
atomic_t tx_fifo_stall; atomic_t tx_fifo_stall;
boolean_t pcix_82544; boolean_t pcix_82544;
boolean_t detect_tx_hung; boolean_t detect_tx_hung;
...@@ -281,14 +304,15 @@ struct e1000_adapter { ...@@ -281,14 +304,15 @@ struct e1000_adapter {
/* RX */ /* RX */
#ifdef CONFIG_E1000_NAPI #ifdef CONFIG_E1000_NAPI
boolean_t (*clean_rx) (struct e1000_adapter *adapter, boolean_t (*clean_rx) (struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring, struct e1000_rx_ring *rx_ring,
int *work_done, int work_to_do); int *work_done, int work_to_do);
#else #else
boolean_t (*clean_rx) (struct e1000_adapter *adapter, boolean_t (*clean_rx) (struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring); struct e1000_rx_ring *rx_ring);
#endif #endif
void (*alloc_rx_buf) (struct e1000_adapter *adapter, void (*alloc_rx_buf) (struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring); struct e1000_rx_ring *rx_ring,
int cleaned_count);
struct e1000_rx_ring *rx_ring; /* One per active queue */ struct e1000_rx_ring *rx_ring; /* One per active queue */
#ifdef CONFIG_E1000_NAPI #ifdef CONFIG_E1000_NAPI
struct net_device *polling_netdev; /* One per active queue */ struct net_device *polling_netdev; /* One per active queue */
...@@ -296,13 +320,15 @@ struct e1000_adapter { ...@@ -296,13 +320,15 @@ struct e1000_adapter {
#ifdef CONFIG_E1000_MQ #ifdef CONFIG_E1000_MQ
struct net_device **cpu_netdev; /* per-cpu */ struct net_device **cpu_netdev; /* per-cpu */
struct call_async_data_struct rx_sched_call_data; struct call_async_data_struct rx_sched_call_data;
int cpu_for_queue[4]; cpumask_t cpumask;
#endif #endif
int num_queues; int num_tx_queues;
int num_rx_queues;
uint64_t hw_csum_err; uint64_t hw_csum_err;
uint64_t hw_csum_good; uint64_t hw_csum_good;
uint64_t rx_hdr_split; uint64_t rx_hdr_split;
uint32_t alloc_rx_buff_failed;
uint32_t rx_int_delay; uint32_t rx_int_delay;
uint32_t rx_abs_int_delay; uint32_t rx_abs_int_delay;
boolean_t rx_csum; boolean_t rx_csum;
...@@ -330,6 +356,7 @@ struct e1000_adapter { ...@@ -330,6 +356,7 @@ struct e1000_adapter {
struct e1000_rx_ring test_rx_ring; struct e1000_rx_ring test_rx_ring;
u32 *config_space;
int msg_enable; int msg_enable;
#ifdef CONFIG_PCI_MSI #ifdef CONFIG_PCI_MSI
boolean_t have_msi; boolean_t have_msi;
......
...@@ -80,6 +80,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = { ...@@ -80,6 +80,7 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
{ "tx_deferred_ok", E1000_STAT(stats.dc) }, { "tx_deferred_ok", E1000_STAT(stats.dc) },
{ "tx_single_coll_ok", E1000_STAT(stats.scc) }, { "tx_single_coll_ok", E1000_STAT(stats.scc) },
{ "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, { "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
{ "tx_timeout_count", E1000_STAT(tx_timeout_count) },
{ "rx_long_length_errors", E1000_STAT(stats.roc) }, { "rx_long_length_errors", E1000_STAT(stats.roc) },
{ "rx_short_length_errors", E1000_STAT(stats.ruc) }, { "rx_short_length_errors", E1000_STAT(stats.ruc) },
{ "rx_align_errors", E1000_STAT(stats.algnerrc) }, { "rx_align_errors", E1000_STAT(stats.algnerrc) },
...@@ -93,9 +94,20 @@ static const struct e1000_stats e1000_gstrings_stats[] = { ...@@ -93,9 +94,20 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
{ "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, { "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
{ "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
{ "rx_header_split", E1000_STAT(rx_hdr_split) }, { "rx_header_split", E1000_STAT(rx_hdr_split) },
{ "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
}; };
#define E1000_STATS_LEN \
#ifdef CONFIG_E1000_MQ
#define E1000_QUEUE_STATS_LEN \
(((struct e1000_adapter *)netdev->priv)->num_tx_queues + \
((struct e1000_adapter *)netdev->priv)->num_rx_queues) \
* (sizeof(struct e1000_queue_stats) / sizeof(uint64_t))
#else
#define E1000_QUEUE_STATS_LEN 0
#endif
#define E1000_GLOBAL_STATS_LEN \
sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats) sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = { static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)", "Eeprom test (offline)", "Register test (offline)", "Eeprom test (offline)",
"Interrupt test (offline)", "Loopback test (offline)", "Interrupt test (offline)", "Loopback test (offline)",
...@@ -183,7 +195,15 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) ...@@ -183,7 +195,15 @@ e1000_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
if(ecmd->autoneg == AUTONEG_ENABLE) { /* When SoL/IDER sessions are active, autoneg/speed/duplex
* cannot be changed */
if (e1000_check_phy_reset_block(hw)) {
DPRINTK(DRV, ERR, "Cannot change link characteristics "
"when SoL/IDER is active.\n");
return -EINVAL;
}
if (ecmd->autoneg == AUTONEG_ENABLE) {
hw->autoneg = 1; hw->autoneg = 1;
if(hw->media_type == e1000_media_type_fiber) if(hw->media_type == e1000_media_type_fiber)
hw->autoneg_advertised = ADVERTISED_1000baseT_Full | hw->autoneg_advertised = ADVERTISED_1000baseT_Full |
...@@ -567,21 +587,21 @@ e1000_get_drvinfo(struct net_device *netdev, ...@@ -567,21 +587,21 @@ e1000_get_drvinfo(struct net_device *netdev,
strncpy(drvinfo->driver, e1000_driver_name, 32); strncpy(drvinfo->driver, e1000_driver_name, 32);
strncpy(drvinfo->version, e1000_driver_version, 32); strncpy(drvinfo->version, e1000_driver_version, 32);
/* EEPROM image version # is reported as firware version # for /* EEPROM image version # is reported as firmware version # for
* 8257{1|2|3} controllers */ * 8257{1|2|3} controllers */
e1000_read_eeprom(&adapter->hw, 5, 1, &eeprom_data); e1000_read_eeprom(&adapter->hw, 5, 1, &eeprom_data);
switch (adapter->hw.mac_type) { switch (adapter->hw.mac_type) {
case e1000_82571: case e1000_82571:
case e1000_82572: case e1000_82572:
case e1000_82573: case e1000_82573:
sprintf(firmware_version, "%d.%d-%d", sprintf(firmware_version, "%d.%d-%d",
(eeprom_data & 0xF000) >> 12, (eeprom_data & 0xF000) >> 12,
(eeprom_data & 0x0FF0) >> 4, (eeprom_data & 0x0FF0) >> 4,
eeprom_data & 0x000F); eeprom_data & 0x000F);
break; break;
default: default:
sprintf(firmware_version, "n/a"); sprintf(firmware_version, "N/A");
} }
strncpy(drvinfo->fw_version, firmware_version, 32); strncpy(drvinfo->fw_version, firmware_version, 32);
...@@ -623,8 +643,8 @@ e1000_set_ringparam(struct net_device *netdev, ...@@ -623,8 +643,8 @@ e1000_set_ringparam(struct net_device *netdev,
struct e1000_rx_ring *rxdr, *rx_old, *rx_new; struct e1000_rx_ring *rxdr, *rx_old, *rx_new;
int i, err, tx_ring_size, rx_ring_size; int i, err, tx_ring_size, rx_ring_size;
tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_queues; tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_queues; rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
if (netif_running(adapter->netdev)) if (netif_running(adapter->netdev))
e1000_down(adapter); e1000_down(adapter);
...@@ -663,10 +683,10 @@ e1000_set_ringparam(struct net_device *netdev, ...@@ -663,10 +683,10 @@ e1000_set_ringparam(struct net_device *netdev,
E1000_MAX_TXD : E1000_MAX_82544_TXD)); E1000_MAX_TXD : E1000_MAX_82544_TXD));
E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE); E1000_ROUNDUP(txdr->count, REQ_TX_DESCRIPTOR_MULTIPLE);
for (i = 0; i < adapter->num_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++)
txdr[i].count = txdr->count; txdr[i].count = txdr->count;
for (i = 0; i < adapter->num_rx_queues; i++)
rxdr[i].count = rxdr->count; rxdr[i].count = rxdr->count;
}
if(netif_running(adapter->netdev)) { if(netif_running(adapter->netdev)) {
/* Try to get new resources before deleting old */ /* Try to get new resources before deleting old */
...@@ -979,18 +999,17 @@ e1000_free_desc_rings(struct e1000_adapter *adapter) ...@@ -979,18 +999,17 @@ e1000_free_desc_rings(struct e1000_adapter *adapter)
} }
} }
if(txdr->desc) { if (txdr->desc) {
pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma); pci_free_consistent(pdev, txdr->size, txdr->desc, txdr->dma);
txdr->desc = NULL; txdr->desc = NULL;
} }
if(rxdr->desc) { if (rxdr->desc) {
pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma); pci_free_consistent(pdev, rxdr->size, rxdr->desc, rxdr->dma);
rxdr->desc = NULL; rxdr->desc = NULL;
} }
kfree(txdr->buffer_info); kfree(txdr->buffer_info);
txdr->buffer_info = NULL; txdr->buffer_info = NULL;
kfree(rxdr->buffer_info); kfree(rxdr->buffer_info);
rxdr->buffer_info = NULL; rxdr->buffer_info = NULL;
...@@ -1327,11 +1346,11 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter) ...@@ -1327,11 +1346,11 @@ e1000_set_phy_loopback(struct e1000_adapter *adapter)
static int static int
e1000_setup_loopback_test(struct e1000_adapter *adapter) e1000_setup_loopback_test(struct e1000_adapter *adapter)
{ {
uint32_t rctl;
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
uint32_t rctl;
if (hw->media_type == e1000_media_type_fiber || if (hw->media_type == e1000_media_type_fiber ||
hw->media_type == e1000_media_type_internal_serdes) { hw->media_type == e1000_media_type_internal_serdes) {
switch (hw->mac_type) { switch (hw->mac_type) {
case e1000_82545: case e1000_82545:
case e1000_82546: case e1000_82546:
...@@ -1362,25 +1381,25 @@ e1000_setup_loopback_test(struct e1000_adapter *adapter) ...@@ -1362,25 +1381,25 @@ e1000_setup_loopback_test(struct e1000_adapter *adapter)
static void static void
e1000_loopback_cleanup(struct e1000_adapter *adapter) e1000_loopback_cleanup(struct e1000_adapter *adapter)
{ {
struct e1000_hw *hw = &adapter->hw;
uint32_t rctl; uint32_t rctl;
uint16_t phy_reg; uint16_t phy_reg;
struct e1000_hw *hw = &adapter->hw;
rctl = E1000_READ_REG(&adapter->hw, RCTL); rctl = E1000_READ_REG(hw, RCTL);
rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC); rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
E1000_WRITE_REG(&adapter->hw, RCTL, rctl); E1000_WRITE_REG(hw, RCTL, rctl);
switch (hw->mac_type) { switch (hw->mac_type) {
case e1000_82571: case e1000_82571:
case e1000_82572: case e1000_82572:
if (hw->media_type == e1000_media_type_fiber || if (hw->media_type == e1000_media_type_fiber ||
hw->media_type == e1000_media_type_internal_serdes){ hw->media_type == e1000_media_type_internal_serdes) {
#define E1000_SERDES_LB_OFF 0x400 #define E1000_SERDES_LB_OFF 0x400
E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_OFF); E1000_WRITE_REG(hw, SCTL, E1000_SERDES_LB_OFF);
msec_delay(10); msec_delay(10);
break; break;
} }
/* fall thru for Cu adapters */ /* Fall Through */
case e1000_82545: case e1000_82545:
case e1000_82546: case e1000_82546:
case e1000_82545_rev_3: case e1000_82545_rev_3:
...@@ -1401,7 +1420,7 @@ static void ...@@ -1401,7 +1420,7 @@ static void
e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
{ {
memset(skb->data, 0xFF, frame_size); memset(skb->data, 0xFF, frame_size);
frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size; frame_size &= ~1;
memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
...@@ -1410,7 +1429,7 @@ e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) ...@@ -1410,7 +1429,7 @@ e1000_create_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
static int static int
e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
{ {
frame_size = (frame_size % 2) ? (frame_size - 1) : frame_size; frame_size &= ~1;
if(*(skb->data + 3) == 0xFF) { if(*(skb->data + 3) == 0xFF) {
if((*(skb->data + frame_size / 2 + 10) == 0xBE) && if((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
(*(skb->data + frame_size / 2 + 12) == 0xAF)) { (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
...@@ -1488,14 +1507,25 @@ e1000_run_loopback_test(struct e1000_adapter *adapter) ...@@ -1488,14 +1507,25 @@ e1000_run_loopback_test(struct e1000_adapter *adapter)
static int static int
e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data) e1000_loopback_test(struct e1000_adapter *adapter, uint64_t *data)
{ {
if((*data = e1000_setup_desc_rings(adapter))) goto err_loopback; /* PHY loopback cannot be performed if SoL/IDER
if((*data = e1000_setup_loopback_test(adapter))) * sessions are active */
goto err_loopback_setup; if (e1000_check_phy_reset_block(&adapter->hw)) {
DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
"when SoL/IDER is active.\n");
*data = 0;
goto out;
}
if ((*data = e1000_setup_desc_rings(adapter)))
goto out;
if ((*data = e1000_setup_loopback_test(adapter)))
goto err_loopback;
*data = e1000_run_loopback_test(adapter); *data = e1000_run_loopback_test(adapter);
e1000_loopback_cleanup(adapter); e1000_loopback_cleanup(adapter);
err_loopback_setup:
e1000_free_desc_rings(adapter);
err_loopback: err_loopback:
e1000_free_desc_rings(adapter);
out:
return *data; return *data;
} }
...@@ -1617,6 +1647,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) ...@@ -1617,6 +1647,7 @@ e1000_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
case E1000_DEV_ID_82546EB_FIBER: case E1000_DEV_ID_82546EB_FIBER:
case E1000_DEV_ID_82546GB_FIBER: case E1000_DEV_ID_82546GB_FIBER:
case E1000_DEV_ID_82571EB_FIBER:
/* Wake events only supported on port A for dual fiber */ /* Wake events only supported on port A for dual fiber */
if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) { if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) {
wol->supported = 0; wol->supported = 0;
...@@ -1660,6 +1691,7 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) ...@@ -1660,6 +1691,7 @@ e1000_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
case E1000_DEV_ID_82546EB_FIBER: case E1000_DEV_ID_82546EB_FIBER:
case E1000_DEV_ID_82546GB_FIBER: case E1000_DEV_ID_82546GB_FIBER:
case E1000_DEV_ID_82571EB_FIBER:
/* Wake events only supported on port A for dual fiber */ /* Wake events only supported on port A for dual fiber */
if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1) if(E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
return wol->wolopts ? -EOPNOTSUPP : 0; return wol->wolopts ? -EOPNOTSUPP : 0;
...@@ -1721,21 +1753,21 @@ e1000_phys_id(struct net_device *netdev, uint32_t data) ...@@ -1721,21 +1753,21 @@ e1000_phys_id(struct net_device *netdev, uint32_t data)
mod_timer(&adapter->blink_timer, jiffies); mod_timer(&adapter->blink_timer, jiffies);
msleep_interruptible(data * 1000); msleep_interruptible(data * 1000);
del_timer_sync(&adapter->blink_timer); del_timer_sync(&adapter->blink_timer);
} } else if (adapter->hw.mac_type < e1000_82573) {
else if(adapter->hw.mac_type < e1000_82573) { E1000_WRITE_REG(&adapter->hw, LEDCTL,
E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE | (E1000_LEDCTL_LED2_BLINK_RATE |
E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK | E1000_LEDCTL_LED0_BLINK | E1000_LEDCTL_LED2_BLINK |
(E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) | (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
(E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) | (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED0_MODE_SHIFT) |
(E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT))); (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED1_MODE_SHIFT)));
msleep_interruptible(data * 1000); msleep_interruptible(data * 1000);
} } else {
else { E1000_WRITE_REG(&adapter->hw, LEDCTL,
E1000_WRITE_REG(&adapter->hw, LEDCTL, (E1000_LEDCTL_LED2_BLINK_RATE | (E1000_LEDCTL_LED2_BLINK_RATE |
E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK | E1000_LEDCTL_LED1_BLINK | E1000_LEDCTL_LED2_BLINK |
(E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) | (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED2_MODE_SHIFT) |
(E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) | (E1000_LEDCTL_MODE_LINK_ACTIVITY << E1000_LEDCTL_LED1_MODE_SHIFT) |
(E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT))); (E1000_LEDCTL_MODE_LED_OFF << E1000_LEDCTL_LED0_MODE_SHIFT)));
msleep_interruptible(data * 1000); msleep_interruptible(data * 1000);
} }
...@@ -1768,19 +1800,43 @@ e1000_get_ethtool_stats(struct net_device *netdev, ...@@ -1768,19 +1800,43 @@ e1000_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, uint64_t *data) struct ethtool_stats *stats, uint64_t *data)
{ {
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
#ifdef CONFIG_E1000_MQ
uint64_t *queue_stat;
int stat_count = sizeof(struct e1000_queue_stats) / sizeof(uint64_t);
int j, k;
#endif
int i; int i;
e1000_update_stats(adapter); e1000_update_stats(adapter);
for(i = 0; i < E1000_STATS_LEN; i++) { for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
data[i] = (e1000_gstrings_stats[i].sizeof_stat == data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p; sizeof(uint64_t)) ? *(uint64_t *)p : *(uint32_t *)p;
} }
#ifdef CONFIG_E1000_MQ
for (j = 0; j < adapter->num_tx_queues; j++) {
queue_stat = (uint64_t *)&adapter->tx_ring[j].tx_stats;
for (k = 0; k < stat_count; k++)
data[i + k] = queue_stat[k];
i += k;
}
for (j = 0; j < adapter->num_rx_queues; j++) {
queue_stat = (uint64_t *)&adapter->rx_ring[j].rx_stats;
for (k = 0; k < stat_count; k++)
data[i + k] = queue_stat[k];
i += k;
}
#endif
/* BUG_ON(i != E1000_STATS_LEN); */
} }
static void static void
e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
{ {
#ifdef CONFIG_E1000_MQ
struct e1000_adapter *adapter = netdev_priv(netdev);
#endif
uint8_t *p = data;
int i; int i;
switch(stringset) { switch(stringset) {
...@@ -1789,11 +1845,26 @@ e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) ...@@ -1789,11 +1845,26 @@ e1000_get_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data)
E1000_TEST_LEN*ETH_GSTRING_LEN); E1000_TEST_LEN*ETH_GSTRING_LEN);
break; break;
case ETH_SS_STATS: case ETH_SS_STATS:
for (i=0; i < E1000_STATS_LEN; i++) { for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
memcpy(data + i * ETH_GSTRING_LEN, memcpy(p, e1000_gstrings_stats[i].stat_string,
e1000_gstrings_stats[i].stat_string, ETH_GSTRING_LEN);
ETH_GSTRING_LEN); p += ETH_GSTRING_LEN;
}
#ifdef CONFIG_E1000_MQ
for (i = 0; i < adapter->num_tx_queues; i++) {
sprintf(p, "tx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
} }
for (i = 0; i < adapter->num_rx_queues; i++) {
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
#endif
/* BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
break; break;
} }
} }
......
...@@ -318,6 +318,8 @@ e1000_set_mac_type(struct e1000_hw *hw) ...@@ -318,6 +318,8 @@ e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_82546GB_FIBER: case E1000_DEV_ID_82546GB_FIBER:
case E1000_DEV_ID_82546GB_SERDES: case E1000_DEV_ID_82546GB_SERDES:
case E1000_DEV_ID_82546GB_PCIE: case E1000_DEV_ID_82546GB_PCIE:
case E1000_DEV_ID_82546GB_QUAD_COPPER:
case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
hw->mac_type = e1000_82546_rev_3; hw->mac_type = e1000_82546_rev_3;
break; break;
case E1000_DEV_ID_82541EI: case E1000_DEV_ID_82541EI:
...@@ -639,6 +641,7 @@ e1000_init_hw(struct e1000_hw *hw) ...@@ -639,6 +641,7 @@ e1000_init_hw(struct e1000_hw *hw)
uint16_t cmd_mmrbc; uint16_t cmd_mmrbc;
uint16_t stat_mmrbc; uint16_t stat_mmrbc;
uint32_t mta_size; uint32_t mta_size;
uint32_t ctrl_ext;
DEBUGFUNC("e1000_init_hw"); DEBUGFUNC("e1000_init_hw");
...@@ -735,7 +738,6 @@ e1000_init_hw(struct e1000_hw *hw) ...@@ -735,7 +738,6 @@ e1000_init_hw(struct e1000_hw *hw)
break; break;
case e1000_82571: case e1000_82571:
case e1000_82572: case e1000_82572:
ctrl |= (1 << 22);
case e1000_82573: case e1000_82573:
ctrl |= E1000_TXDCTL_COUNT_DESC; ctrl |= E1000_TXDCTL_COUNT_DESC;
break; break;
...@@ -775,6 +777,15 @@ e1000_init_hw(struct e1000_hw *hw) ...@@ -775,6 +777,15 @@ e1000_init_hw(struct e1000_hw *hw)
*/ */
e1000_clear_hw_cntrs(hw); e1000_clear_hw_cntrs(hw);
if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
/* Relaxed ordering must be disabled to avoid a parity
* error crash in a PCI slot. */
ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
}
return ret_val; return ret_val;
} }
...@@ -838,6 +849,11 @@ e1000_setup_link(struct e1000_hw *hw) ...@@ -838,6 +849,11 @@ e1000_setup_link(struct e1000_hw *hw)
DEBUGFUNC("e1000_setup_link"); DEBUGFUNC("e1000_setup_link");
/* In the case of the phy reset being blocked, we already have a link.
* We do not have to set it up again. */
if (e1000_check_phy_reset_block(hw))
return E1000_SUCCESS;
/* Read and store word 0x0F of the EEPROM. This word contains bits /* Read and store word 0x0F of the EEPROM. This word contains bits
* that determine the hardware's default PAUSE (flow control) mode, * that determine the hardware's default PAUSE (flow control) mode,
* a bit that determines whether the HW defaults to enabling or * a bit that determines whether the HW defaults to enabling or
...@@ -1929,14 +1945,19 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw) ...@@ -1929,14 +1945,19 @@ e1000_phy_force_speed_duplex(struct e1000_hw *hw)
void void
e1000_config_collision_dist(struct e1000_hw *hw) e1000_config_collision_dist(struct e1000_hw *hw)
{ {
uint32_t tctl; uint32_t tctl, coll_dist;
DEBUGFUNC("e1000_config_collision_dist"); DEBUGFUNC("e1000_config_collision_dist");
if (hw->mac_type < e1000_82543)
coll_dist = E1000_COLLISION_DISTANCE_82542;
else
coll_dist = E1000_COLLISION_DISTANCE;
tctl = E1000_READ_REG(hw, TCTL); tctl = E1000_READ_REG(hw, TCTL);
tctl &= ~E1000_TCTL_COLD; tctl &= ~E1000_TCTL_COLD;
tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT; tctl |= coll_dist << E1000_COLD_SHIFT;
E1000_WRITE_REG(hw, TCTL, tctl); E1000_WRITE_REG(hw, TCTL, tctl);
E1000_WRITE_FLUSH(hw); E1000_WRITE_FLUSH(hw);
...@@ -2982,6 +3003,8 @@ e1000_phy_hw_reset(struct e1000_hw *hw) ...@@ -2982,6 +3003,8 @@ e1000_phy_hw_reset(struct e1000_hw *hw)
if (hw->mac_type < e1000_82571) if (hw->mac_type < e1000_82571)
msec_delay(10); msec_delay(10);
else
udelay(100);
E1000_WRITE_REG(hw, CTRL, ctrl); E1000_WRITE_REG(hw, CTRL, ctrl);
E1000_WRITE_FLUSH(hw); E1000_WRITE_FLUSH(hw);
...@@ -3881,14 +3904,16 @@ e1000_read_eeprom(struct e1000_hw *hw, ...@@ -3881,14 +3904,16 @@ e1000_read_eeprom(struct e1000_hw *hw,
return -E1000_ERR_EEPROM; return -E1000_ERR_EEPROM;
} }
/* FLASH reads without acquiring the semaphore are safe in 82573-based /* FLASH reads without acquiring the semaphore are safe */
* controllers. if (e1000_is_onboard_nvm_eeprom(hw) == TRUE &&
*/ hw->eeprom.use_eerd == FALSE) {
if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) || switch (hw->mac_type) {
(hw->mac_type != e1000_82573)) { default:
/* Prepare the EEPROM for reading */ /* Prepare the EEPROM for reading */
if(e1000_acquire_eeprom(hw) != E1000_SUCCESS) if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
return -E1000_ERR_EEPROM; return -E1000_ERR_EEPROM;
break;
}
} }
if(eeprom->use_eerd == TRUE) { if(eeprom->use_eerd == TRUE) {
...@@ -6720,6 +6745,12 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw) ...@@ -6720,6 +6745,12 @@ e1000_get_phy_cfg_done(struct e1000_hw *hw)
break; break;
} }
/* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high.
* Need to wait for PHY configuration completion before accessing NVM
* and PHY. */
if (hw->mac_type == e1000_82573)
msec_delay(25);
return E1000_SUCCESS; return E1000_SUCCESS;
} }
......
...@@ -439,6 +439,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); ...@@ -439,6 +439,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
#define E1000_DEV_ID_82546GB_FIBER 0x107A #define E1000_DEV_ID_82546GB_FIBER 0x107A
#define E1000_DEV_ID_82546GB_SERDES 0x107B #define E1000_DEV_ID_82546GB_SERDES 0x107B
#define E1000_DEV_ID_82546GB_PCIE 0x108A #define E1000_DEV_ID_82546GB_PCIE 0x108A
#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
#define E1000_DEV_ID_82547EI 0x1019 #define E1000_DEV_ID_82547EI 0x1019
#define E1000_DEV_ID_82571EB_COPPER 0x105E #define E1000_DEV_ID_82571EB_COPPER 0x105E
#define E1000_DEV_ID_82571EB_FIBER 0x105F #define E1000_DEV_ID_82571EB_FIBER 0x105F
...@@ -449,6 +450,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw); ...@@ -449,6 +450,7 @@ int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
#define E1000_DEV_ID_82573E 0x108B #define E1000_DEV_ID_82573E 0x108B
#define E1000_DEV_ID_82573E_IAMT 0x108C #define E1000_DEV_ID_82573E_IAMT 0x108C
#define E1000_DEV_ID_82573L 0x109A #define E1000_DEV_ID_82573L 0x109A
#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
#define NODE_ADDRESS_SIZE 6 #define NODE_ADDRESS_SIZE 6
...@@ -1497,6 +1499,7 @@ struct e1000_hw { ...@@ -1497,6 +1499,7 @@ struct e1000_hw {
#define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */ #define E1000_CTRL_EXT_EE_RST 0x00002000 /* Reinitialize from EEPROM */
#define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */ #define E1000_CTRL_EXT_IPS 0x00004000 /* Invert Power State */
#define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */ #define E1000_CTRL_EXT_SPD_BYPS 0x00008000 /* Speed Select Bypass */
#define E1000_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */
#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 #define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 #define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
#define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000 #define E1000_CTRL_EXT_LINK_MODE_TBI 0x00C00000
...@@ -1954,6 +1957,23 @@ struct e1000_host_command_info { ...@@ -1954,6 +1957,23 @@ struct e1000_host_command_info {
#define E1000_MDALIGN 4096 #define E1000_MDALIGN 4096
/* PCI-Ex registers */
/* PCI-Ex Control Register */
#define E1000_GCR_RXD_NO_SNOOP 0x00000001
#define E1000_GCR_RXDSCW_NO_SNOOP 0x00000002
#define E1000_GCR_RXDSCR_NO_SNOOP 0x00000004
#define E1000_GCR_TXD_NO_SNOOP 0x00000008
#define E1000_GCR_TXDSCW_NO_SNOOP 0x00000010
#define E1000_GCR_TXDSCR_NO_SNOOP 0x00000020
#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP | \
E1000_GCR_RXDSCW_NO_SNOOP | \
E1000_GCR_RXDSCR_NO_SNOOP | \
E1000_GCR TXD_NO_SNOOP | \
E1000_GCR_TXDSCW_NO_SNOOP | \
E1000_GCR_TXDSCR_NO_SNOOP)
#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 #define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
/* Function Active and Power State to MNG */ /* Function Active and Power State to MNG */
#define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003 #define E1000_FACTPS_FUNC0_POWER_STATE_MASK 0x00000003
...@@ -2077,7 +2097,10 @@ struct e1000_host_command_info { ...@@ -2077,7 +2097,10 @@ struct e1000_host_command_info {
/* Collision related configuration parameters */ /* Collision related configuration parameters */
#define E1000_COLLISION_THRESHOLD 15 #define E1000_COLLISION_THRESHOLD 15
#define E1000_CT_SHIFT 4 #define E1000_CT_SHIFT 4
#define E1000_COLLISION_DISTANCE 64 /* Collision distance is a 0-based value that applies to
* half-duplex-capable hardware only. */
#define E1000_COLLISION_DISTANCE 63
#define E1000_COLLISION_DISTANCE_82542 64
#define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE #define E1000_FDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
#define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE #define E1000_HDX_COLLISION_DISTANCE E1000_COLLISION_DISTANCE
#define E1000_COLD_SHIFT 12 #define E1000_COLD_SHIFT 12
......
...@@ -43,7 +43,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; ...@@ -43,7 +43,7 @@ static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
#else #else
#define DRIVERNAPI "-NAPI" #define DRIVERNAPI "-NAPI"
#endif #endif
#define DRV_VERSION "6.1.16-k2"DRIVERNAPI #define DRV_VERSION "6.3.9-k2"DRIVERNAPI
char e1000_driver_version[] = DRV_VERSION; char e1000_driver_version[] = DRV_VERSION;
static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation."; static char e1000_copyright[] = "Copyright (c) 1999-2005 Intel Corporation.";
...@@ -97,7 +97,9 @@ static struct pci_device_id e1000_pci_tbl[] = { ...@@ -97,7 +97,9 @@ static struct pci_device_id e1000_pci_tbl[] = {
INTEL_E1000_ETHERNET_DEVICE(0x108A), INTEL_E1000_ETHERNET_DEVICE(0x108A),
INTEL_E1000_ETHERNET_DEVICE(0x108B), INTEL_E1000_ETHERNET_DEVICE(0x108B),
INTEL_E1000_ETHERNET_DEVICE(0x108C), INTEL_E1000_ETHERNET_DEVICE(0x108C),
INTEL_E1000_ETHERNET_DEVICE(0x1099),
INTEL_E1000_ETHERNET_DEVICE(0x109A), INTEL_E1000_ETHERNET_DEVICE(0x109A),
INTEL_E1000_ETHERNET_DEVICE(0x10B5),
/* required last entry */ /* required last entry */
{0,} {0,}
}; };
...@@ -171,9 +173,11 @@ static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, ...@@ -171,9 +173,11 @@ static boolean_t e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring); struct e1000_rx_ring *rx_ring);
#endif #endif
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter, static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring); struct e1000_rx_ring *rx_ring,
int cleaned_count);
static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring); struct e1000_rx_ring *rx_ring,
int cleaned_count);
static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd); static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
int cmd); int cmd);
...@@ -319,7 +323,75 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter) ...@@ -319,7 +323,75 @@ e1000_update_mng_vlan(struct e1000_adapter *adapter)
} }
} }
} }
/**
* e1000_release_hw_control - release control of the h/w to f/w
* @adapter: address of board private structure
*
* e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means that the
* driver is no longer loaded. For AMT version (only with 82573) i
* of the f/w this means that the netowrk i/f is closed.
*
**/
static inline void
e1000_release_hw_control(struct e1000_adapter *adapter)
{
uint32_t ctrl_ext;
uint32_t swsm;
/* Let firmware taken over control of h/w */
switch (adapter->hw.mac_type) {
case e1000_82571:
case e1000_82572:
ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
break;
case e1000_82573:
swsm = E1000_READ_REG(&adapter->hw, SWSM);
E1000_WRITE_REG(&adapter->hw, SWSM,
swsm & ~E1000_SWSM_DRV_LOAD);
default:
break;
}
}
/**
* e1000_get_hw_control - get control of the h/w from f/w
* @adapter: address of board private structure
*
* e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
* For ASF and Pass Through versions of f/w this means that
* the driver is loaded. For AMT version (only with 82573)
* of the f/w this means that the netowrk i/f is open.
*
**/
static inline void
e1000_get_hw_control(struct e1000_adapter *adapter)
{
uint32_t ctrl_ext;
uint32_t swsm;
/* Let firmware know the driver has taken over */
switch (adapter->hw.mac_type) {
case e1000_82571:
case e1000_82572:
ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
break;
case e1000_82573:
swsm = E1000_READ_REG(&adapter->hw, SWSM);
E1000_WRITE_REG(&adapter->hw, SWSM,
swsm | E1000_SWSM_DRV_LOAD);
break;
default:
break;
}
}
int int
e1000_up(struct e1000_adapter *adapter) e1000_up(struct e1000_adapter *adapter)
{ {
...@@ -343,8 +415,14 @@ e1000_up(struct e1000_adapter *adapter) ...@@ -343,8 +415,14 @@ e1000_up(struct e1000_adapter *adapter)
e1000_configure_tx(adapter); e1000_configure_tx(adapter);
e1000_setup_rctl(adapter); e1000_setup_rctl(adapter);
e1000_configure_rx(adapter); e1000_configure_rx(adapter);
for (i = 0; i < adapter->num_queues; i++) /* call E1000_DESC_UNUSED which always leaves
adapter->alloc_rx_buf(adapter, &adapter->rx_ring[i]); * at least 1 descriptor unused to make sure
* next_to_use != next_to_clean */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct e1000_rx_ring *ring = &adapter->rx_ring[i];
adapter->alloc_rx_buf(adapter, ring,
E1000_DESC_UNUSED(ring));
}
#ifdef CONFIG_PCI_MSI #ifdef CONFIG_PCI_MSI
if(adapter->hw.mac_type > e1000_82547_rev_2) { if(adapter->hw.mac_type > e1000_82547_rev_2) {
...@@ -364,6 +442,12 @@ e1000_up(struct e1000_adapter *adapter) ...@@ -364,6 +442,12 @@ e1000_up(struct e1000_adapter *adapter)
return err; return err;
} }
#ifdef CONFIG_E1000_MQ
e1000_setup_queue_mapping(adapter);
#endif
adapter->tx_queue_len = netdev->tx_queue_len;
mod_timer(&adapter->watchdog_timer, jiffies); mod_timer(&adapter->watchdog_timer, jiffies);
#ifdef CONFIG_E1000_NAPI #ifdef CONFIG_E1000_NAPI
...@@ -378,6 +462,8 @@ void ...@@ -378,6 +462,8 @@ void
e1000_down(struct e1000_adapter *adapter) e1000_down(struct e1000_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) &&
e1000_check_mng_mode(&adapter->hw);
e1000_irq_disable(adapter); e1000_irq_disable(adapter);
#ifdef CONFIG_E1000_MQ #ifdef CONFIG_E1000_MQ
...@@ -396,6 +482,7 @@ e1000_down(struct e1000_adapter *adapter) ...@@ -396,6 +482,7 @@ e1000_down(struct e1000_adapter *adapter)
#ifdef CONFIG_E1000_NAPI #ifdef CONFIG_E1000_NAPI
netif_poll_disable(netdev); netif_poll_disable(netdev);
#endif #endif
netdev->tx_queue_len = adapter->tx_queue_len;
adapter->link_speed = 0; adapter->link_speed = 0;
adapter->link_duplex = 0; adapter->link_duplex = 0;
netif_carrier_off(netdev); netif_carrier_off(netdev);
...@@ -405,12 +492,16 @@ e1000_down(struct e1000_adapter *adapter) ...@@ -405,12 +492,16 @@ e1000_down(struct e1000_adapter *adapter)
e1000_clean_all_tx_rings(adapter); e1000_clean_all_tx_rings(adapter);
e1000_clean_all_rx_rings(adapter); e1000_clean_all_rx_rings(adapter);
/* If WoL is not enabled and management mode is not IAMT /* Power down the PHY so no link is implied when interface is down *
* Power down the PHY so no link is implied when interface is down */ * The PHY cannot be powered down if any of the following is TRUE *
if(!adapter->wol && adapter->hw.mac_type >= e1000_82540 && * (a) WoL is enabled
* (b) AMT is active
* (c) SoL/IDER session is active */
if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
adapter->hw.media_type == e1000_media_type_copper && adapter->hw.media_type == e1000_media_type_copper &&
!e1000_check_mng_mode(&adapter->hw) && !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) &&
!(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN)) { !mng_mode_enabled &&
!e1000_check_phy_reset_block(&adapter->hw)) {
uint16_t mii_reg; uint16_t mii_reg;
e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg); e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN; mii_reg |= MII_CR_POWER_DOWN;
...@@ -422,10 +513,8 @@ e1000_down(struct e1000_adapter *adapter) ...@@ -422,10 +513,8 @@ e1000_down(struct e1000_adapter *adapter)
void void
e1000_reset(struct e1000_adapter *adapter) e1000_reset(struct e1000_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev;
uint32_t pba, manc; uint32_t pba, manc;
uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF; uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
uint16_t fc_low_water_mark = E1000_FC_LOW_DIFF;
/* Repartition Pba for greater than 9k mtu /* Repartition Pba for greater than 9k mtu
* To take effect CTRL.RST is required. * To take effect CTRL.RST is required.
...@@ -449,15 +538,8 @@ e1000_reset(struct e1000_adapter *adapter) ...@@ -449,15 +538,8 @@ e1000_reset(struct e1000_adapter *adapter)
} }
if((adapter->hw.mac_type != e1000_82573) && if((adapter->hw.mac_type != e1000_82573) &&
(adapter->rx_buffer_len > E1000_RXBUFFER_8192)) { (adapter->netdev->mtu > E1000_RXBUFFER_8192))
pba -= 8; /* allocate more FIFO for Tx */ pba -= 8; /* allocate more FIFO for Tx */
/* send an XOFF when there is enough space in the
* Rx FIFO to hold one extra full size Rx packet
*/
fc_high_water_mark = netdev->mtu + ENET_HEADER_SIZE +
ETHERNET_FCS_SIZE + 1;
fc_low_water_mark = fc_high_water_mark + 8;
}
if(adapter->hw.mac_type == e1000_82547) { if(adapter->hw.mac_type == e1000_82547) {
...@@ -471,10 +553,12 @@ e1000_reset(struct e1000_adapter *adapter) ...@@ -471,10 +553,12 @@ e1000_reset(struct e1000_adapter *adapter)
E1000_WRITE_REG(&adapter->hw, PBA, pba); E1000_WRITE_REG(&adapter->hw, PBA, pba);
/* flow control settings */ /* flow control settings */
adapter->hw.fc_high_water = (pba << E1000_PBA_BYTES_SHIFT) - /* Set the FC high water mark to 90% of the FIFO size.
fc_high_water_mark; * Required to clear last 3 LSB */
adapter->hw.fc_low_water = (pba << E1000_PBA_BYTES_SHIFT) - fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
fc_low_water_mark;
adapter->hw.fc_high_water = fc_high_water_mark;
adapter->hw.fc_low_water = fc_high_water_mark - 8;
adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME; adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
adapter->hw.fc_send_xon = 1; adapter->hw.fc_send_xon = 1;
adapter->hw.fc = adapter->hw.original_fc; adapter->hw.fc = adapter->hw.original_fc;
...@@ -517,8 +601,6 @@ e1000_probe(struct pci_dev *pdev, ...@@ -517,8 +601,6 @@ e1000_probe(struct pci_dev *pdev,
struct net_device *netdev; struct net_device *netdev;
struct e1000_adapter *adapter; struct e1000_adapter *adapter;
unsigned long mmio_start, mmio_len; unsigned long mmio_start, mmio_len;
uint32_t ctrl_ext;
uint32_t swsm;
static int cards_found = 0; static int cards_found = 0;
int i, err, pci_using_dac; int i, err, pci_using_dac;
...@@ -712,8 +794,7 @@ e1000_probe(struct pci_dev *pdev, ...@@ -712,8 +794,7 @@ e1000_probe(struct pci_dev *pdev,
case e1000_82546: case e1000_82546:
case e1000_82546_rev_3: case e1000_82546_rev_3:
case e1000_82571: case e1000_82571:
if((E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1) if(E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
&& (adapter->hw.media_type == e1000_media_type_copper)) {
e1000_read_eeprom(&adapter->hw, e1000_read_eeprom(&adapter->hw,
EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data); EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
break; break;
...@@ -727,25 +808,36 @@ e1000_probe(struct pci_dev *pdev, ...@@ -727,25 +808,36 @@ e1000_probe(struct pci_dev *pdev,
if(eeprom_data & eeprom_apme_mask) if(eeprom_data & eeprom_apme_mask)
adapter->wol |= E1000_WUFC_MAG; adapter->wol |= E1000_WUFC_MAG;
/* print bus type/speed/width info */
{
struct e1000_hw *hw = &adapter->hw;
DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
(hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
(hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
(hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
(hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
(hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
(hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
(hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
"32-bit"));
}
for (i = 0; i < 6; i++)
printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
/* reset the hardware with the new settings */ /* reset the hardware with the new settings */
e1000_reset(adapter); e1000_reset(adapter);
/* Let firmware know the driver has taken over */ /* If the controller is 82573 and f/w is AMT, do not set
switch(adapter->hw.mac_type) { * DRV_LOAD until the interface is up. For all other cases,
case e1000_82571: * let the f/w know that the h/w is now under the control
case e1000_82572: * of the driver. */
ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); if (adapter->hw.mac_type != e1000_82573 ||
E1000_WRITE_REG(&adapter->hw, CTRL_EXT, !e1000_check_mng_mode(&adapter->hw))
ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); e1000_get_hw_control(adapter);
break;
case e1000_82573:
swsm = E1000_READ_REG(&adapter->hw, SWSM);
E1000_WRITE_REG(&adapter->hw, SWSM,
swsm | E1000_SWSM_DRV_LOAD);
break;
default:
break;
}
strcpy(netdev->name, "eth%d"); strcpy(netdev->name, "eth%d");
if((err = register_netdev(netdev))) if((err = register_netdev(netdev)))
...@@ -782,8 +874,7 @@ e1000_remove(struct pci_dev *pdev) ...@@ -782,8 +874,7 @@ e1000_remove(struct pci_dev *pdev)
{ {
struct net_device *netdev = pci_get_drvdata(pdev); struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
uint32_t ctrl_ext; uint32_t manc;
uint32_t manc, swsm;
#ifdef CONFIG_E1000_NAPI #ifdef CONFIG_E1000_NAPI
int i; int i;
#endif #endif
...@@ -799,26 +890,13 @@ e1000_remove(struct pci_dev *pdev) ...@@ -799,26 +890,13 @@ e1000_remove(struct pci_dev *pdev)
} }
} }
switch(adapter->hw.mac_type) { /* Release control of h/w to f/w. If f/w is AMT enabled, this
case e1000_82571: * would have already happened in close and is redundant. */
case e1000_82572: e1000_release_hw_control(adapter);
ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
break;
case e1000_82573:
swsm = E1000_READ_REG(&adapter->hw, SWSM);
E1000_WRITE_REG(&adapter->hw, SWSM,
swsm & ~E1000_SWSM_DRV_LOAD);
break;
default:
break;
}
unregister_netdev(netdev); unregister_netdev(netdev);
#ifdef CONFIG_E1000_NAPI #ifdef CONFIG_E1000_NAPI
for (i = 0; i < adapter->num_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
__dev_put(&adapter->polling_netdev[i]); __dev_put(&adapter->polling_netdev[i]);
#endif #endif
...@@ -923,15 +1001,34 @@ e1000_sw_init(struct e1000_adapter *adapter) ...@@ -923,15 +1001,34 @@ e1000_sw_init(struct e1000_adapter *adapter)
switch (hw->mac_type) { switch (hw->mac_type) {
case e1000_82571: case e1000_82571:
case e1000_82572: case e1000_82572:
adapter->num_queues = 2; /* These controllers support 2 tx queues, but with a single
* qdisc implementation, multiple tx queues aren't quite as
* interesting. If we can find a logical way of mapping
* flows to a queue, then perhaps we can up the num_tx_queue
* count back to its default. Until then, we run the risk of
* terrible performance due to SACK overload. */
adapter->num_tx_queues = 1;
adapter->num_rx_queues = 2;
break; break;
default: default:
adapter->num_queues = 1; adapter->num_tx_queues = 1;
adapter->num_rx_queues = 1;
break; break;
} }
adapter->num_queues = min(adapter->num_queues, num_online_cpus()); adapter->num_rx_queues = min(adapter->num_rx_queues, num_online_cpus());
adapter->num_tx_queues = min(adapter->num_tx_queues, num_online_cpus());
DPRINTK(DRV, INFO, "Multiqueue Enabled: Rx Queue count = %u %s\n",
adapter->num_rx_queues,
((adapter->num_rx_queues == 1)
? ((num_online_cpus() > 1)
? "(due to unsupported feature in current adapter)"
: "(due to unsupported system configuration)")
: ""));
DPRINTK(DRV, INFO, "Multiqueue Enabled: Tx Queue count = %u\n",
adapter->num_tx_queues);
#else #else
adapter->num_queues = 1; adapter->num_tx_queues = 1;
adapter->num_rx_queues = 1;
#endif #endif
if (e1000_alloc_queues(adapter)) { if (e1000_alloc_queues(adapter)) {
...@@ -940,17 +1037,14 @@ e1000_sw_init(struct e1000_adapter *adapter) ...@@ -940,17 +1037,14 @@ e1000_sw_init(struct e1000_adapter *adapter)
} }
#ifdef CONFIG_E1000_NAPI #ifdef CONFIG_E1000_NAPI
for (i = 0; i < adapter->num_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->polling_netdev[i].priv = adapter; adapter->polling_netdev[i].priv = adapter;
adapter->polling_netdev[i].poll = &e1000_clean; adapter->polling_netdev[i].poll = &e1000_clean;
adapter->polling_netdev[i].weight = 64; adapter->polling_netdev[i].weight = 64;
dev_hold(&adapter->polling_netdev[i]); dev_hold(&adapter->polling_netdev[i]);
set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state); set_bit(__LINK_STATE_START, &adapter->polling_netdev[i].state);
} }
#endif spin_lock_init(&adapter->tx_queue_lock);
#ifdef CONFIG_E1000_MQ
e1000_setup_queue_mapping(adapter);
#endif #endif
atomic_set(&adapter->irq_sem, 1); atomic_set(&adapter->irq_sem, 1);
...@@ -973,13 +1067,13 @@ e1000_alloc_queues(struct e1000_adapter *adapter) ...@@ -973,13 +1067,13 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
{ {
int size; int size;
size = sizeof(struct e1000_tx_ring) * adapter->num_queues; size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
adapter->tx_ring = kmalloc(size, GFP_KERNEL); adapter->tx_ring = kmalloc(size, GFP_KERNEL);
if (!adapter->tx_ring) if (!adapter->tx_ring)
return -ENOMEM; return -ENOMEM;
memset(adapter->tx_ring, 0, size); memset(adapter->tx_ring, 0, size);
size = sizeof(struct e1000_rx_ring) * adapter->num_queues; size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
adapter->rx_ring = kmalloc(size, GFP_KERNEL); adapter->rx_ring = kmalloc(size, GFP_KERNEL);
if (!adapter->rx_ring) { if (!adapter->rx_ring) {
kfree(adapter->tx_ring); kfree(adapter->tx_ring);
...@@ -988,7 +1082,7 @@ e1000_alloc_queues(struct e1000_adapter *adapter) ...@@ -988,7 +1082,7 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
memset(adapter->rx_ring, 0, size); memset(adapter->rx_ring, 0, size);
#ifdef CONFIG_E1000_NAPI #ifdef CONFIG_E1000_NAPI
size = sizeof(struct net_device) * adapter->num_queues; size = sizeof(struct net_device) * adapter->num_rx_queues;
adapter->polling_netdev = kmalloc(size, GFP_KERNEL); adapter->polling_netdev = kmalloc(size, GFP_KERNEL);
if (!adapter->polling_netdev) { if (!adapter->polling_netdev) {
kfree(adapter->tx_ring); kfree(adapter->tx_ring);
...@@ -998,6 +1092,14 @@ e1000_alloc_queues(struct e1000_adapter *adapter) ...@@ -998,6 +1092,14 @@ e1000_alloc_queues(struct e1000_adapter *adapter)
memset(adapter->polling_netdev, 0, size); memset(adapter->polling_netdev, 0, size);
#endif #endif
#ifdef CONFIG_E1000_MQ
adapter->rx_sched_call_data.func = e1000_rx_schedule;
adapter->rx_sched_call_data.info = adapter->netdev;
adapter->cpu_netdev = alloc_percpu(struct net_device *);
adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
#endif
return E1000_SUCCESS; return E1000_SUCCESS;
} }
...@@ -1017,14 +1119,15 @@ e1000_setup_queue_mapping(struct e1000_adapter *adapter) ...@@ -1017,14 +1119,15 @@ e1000_setup_queue_mapping(struct e1000_adapter *adapter)
lock_cpu_hotplug(); lock_cpu_hotplug();
i = 0; i = 0;
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
*per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_queues]; *per_cpu_ptr(adapter->cpu_tx_ring, cpu) = &adapter->tx_ring[i % adapter->num_tx_queues];
/* This is incomplete because we'd like to assign separate /* This is incomplete because we'd like to assign separate
* physical cpus to these netdev polling structures and * physical cpus to these netdev polling structures and
* avoid saturating a subset of cpus. * avoid saturating a subset of cpus.
*/ */
if (i < adapter->num_queues) { if (i < adapter->num_rx_queues) {
*per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i]; *per_cpu_ptr(adapter->cpu_netdev, cpu) = &adapter->polling_netdev[i];
adapter->cpu_for_queue[i] = cpu; adapter->rx_ring[i].cpu = cpu;
cpu_set(cpu, adapter->cpumask);
} else } else
*per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL; *per_cpu_ptr(adapter->cpu_netdev, cpu) = NULL;
...@@ -1071,6 +1174,12 @@ e1000_open(struct net_device *netdev) ...@@ -1071,6 +1174,12 @@ e1000_open(struct net_device *netdev)
e1000_update_mng_vlan(adapter); e1000_update_mng_vlan(adapter);
} }
/* If AMT is enabled, let the firmware know that the network
* interface is now open */
if (adapter->hw.mac_type == e1000_82573 &&
e1000_check_mng_mode(&adapter->hw))
e1000_get_hw_control(adapter);
return E1000_SUCCESS; return E1000_SUCCESS;
err_up: err_up:
...@@ -1109,6 +1218,13 @@ e1000_close(struct net_device *netdev) ...@@ -1109,6 +1218,13 @@ e1000_close(struct net_device *netdev)
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) { E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id); e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
} }
/* If AMT is enabled, let the firmware know that the network
* interface is now closed */
if (adapter->hw.mac_type == e1000_82573 &&
e1000_check_mng_mode(&adapter->hw))
e1000_release_hw_control(adapter);
return 0; return 0;
} }
...@@ -1229,7 +1345,7 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter) ...@@ -1229,7 +1345,7 @@ e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
{ {
int i, err = 0; int i, err = 0;
for (i = 0; i < adapter->num_queues; i++) { for (i = 0; i < adapter->num_tx_queues; i++) {
err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]); err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
if (err) { if (err) {
DPRINTK(PROBE, ERR, DPRINTK(PROBE, ERR,
...@@ -1254,10 +1370,11 @@ e1000_configure_tx(struct e1000_adapter *adapter) ...@@ -1254,10 +1370,11 @@ e1000_configure_tx(struct e1000_adapter *adapter)
uint64_t tdba; uint64_t tdba;
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
uint32_t tdlen, tctl, tipg, tarc; uint32_t tdlen, tctl, tipg, tarc;
uint32_t ipgr1, ipgr2;
/* Setup the HW Tx Head and Tail descriptor pointers */ /* Setup the HW Tx Head and Tail descriptor pointers */
switch (adapter->num_queues) { switch (adapter->num_tx_queues) {
case 2: case 2:
tdba = adapter->tx_ring[1].dma; tdba = adapter->tx_ring[1].dma;
tdlen = adapter->tx_ring[1].count * tdlen = adapter->tx_ring[1].count *
...@@ -1287,22 +1404,26 @@ e1000_configure_tx(struct e1000_adapter *adapter) ...@@ -1287,22 +1404,26 @@ e1000_configure_tx(struct e1000_adapter *adapter)
/* Set the default values for the Tx Inter Packet Gap timer */ /* Set the default values for the Tx Inter Packet Gap timer */
if (hw->media_type == e1000_media_type_fiber ||
hw->media_type == e1000_media_type_internal_serdes)
tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
else
tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
switch (hw->mac_type) { switch (hw->mac_type) {
case e1000_82542_rev2_0: case e1000_82542_rev2_0:
case e1000_82542_rev2_1: case e1000_82542_rev2_1:
tipg = DEFAULT_82542_TIPG_IPGT; tipg = DEFAULT_82542_TIPG_IPGT;
tipg |= DEFAULT_82542_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT; ipgr1 = DEFAULT_82542_TIPG_IPGR1;
tipg |= DEFAULT_82542_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT; ipgr2 = DEFAULT_82542_TIPG_IPGR2;
break; break;
default: default:
if (hw->media_type == e1000_media_type_fiber || ipgr1 = DEFAULT_82543_TIPG_IPGR1;
hw->media_type == e1000_media_type_internal_serdes) ipgr2 = DEFAULT_82543_TIPG_IPGR2;
tipg = DEFAULT_82543_TIPG_IPGT_FIBER; break;
else
tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
tipg |= DEFAULT_82543_TIPG_IPGR1 << E1000_TIPG_IPGR1_SHIFT;
tipg |= DEFAULT_82543_TIPG_IPGR2 << E1000_TIPG_IPGR2_SHIFT;
} }
tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
E1000_WRITE_REG(hw, TIPG, tipg); E1000_WRITE_REG(hw, TIPG, tipg);
/* Set the Tx Interrupt Delay register */ /* Set the Tx Interrupt Delay register */
...@@ -1454,6 +1575,8 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter, ...@@ -1454,6 +1575,8 @@ e1000_setup_rx_resources(struct e1000_adapter *adapter,
rxdr->next_to_clean = 0; rxdr->next_to_clean = 0;
rxdr->next_to_use = 0; rxdr->next_to_use = 0;
rxdr->rx_skb_top = NULL;
rxdr->rx_skb_prev = NULL;
return 0; return 0;
} }
...@@ -1475,7 +1598,7 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter) ...@@ -1475,7 +1598,7 @@ e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
{ {
int i, err = 0; int i, err = 0;
for (i = 0; i < adapter->num_queues; i++) { for (i = 0; i < adapter->num_rx_queues; i++) {
err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]); err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
if (err) { if (err) {
DPRINTK(PROBE, ERR, DPRINTK(PROBE, ERR,
...@@ -1510,7 +1633,10 @@ e1000_setup_rctl(struct e1000_adapter *adapter) ...@@ -1510,7 +1633,10 @@ e1000_setup_rctl(struct e1000_adapter *adapter)
E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF | E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
(adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT); (adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
if(adapter->hw.tbi_compatibility_on == 1) if (adapter->hw.mac_type > e1000_82543)
rctl |= E1000_RCTL_SECRC;
if (adapter->hw.tbi_compatibility_on == 1)
rctl |= E1000_RCTL_SBP; rctl |= E1000_RCTL_SBP;
else else
rctl &= ~E1000_RCTL_SBP; rctl &= ~E1000_RCTL_SBP;
...@@ -1638,16 +1764,21 @@ e1000_configure_rx(struct e1000_adapter *adapter) ...@@ -1638,16 +1764,21 @@ e1000_configure_rx(struct e1000_adapter *adapter)
} }
if (hw->mac_type >= e1000_82571) { if (hw->mac_type >= e1000_82571) {
/* Reset delay timers after every interrupt */
ctrl_ext = E1000_READ_REG(hw, CTRL_EXT); ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
/* Reset delay timers after every interrupt */
ctrl_ext |= E1000_CTRL_EXT_CANC; ctrl_ext |= E1000_CTRL_EXT_CANC;
#ifdef CONFIG_E1000_NAPI
/* Auto-Mask interrupts upon ICR read. */
ctrl_ext |= E1000_CTRL_EXT_IAME;
#endif
E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext); E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
E1000_WRITE_REG(hw, IAM, ~0);
E1000_WRITE_FLUSH(hw); E1000_WRITE_FLUSH(hw);
} }
/* Setup the HW Rx Head and Tail Descriptor Pointers and /* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */ * the Base and Length of the Rx Descriptor Ring */
switch (adapter->num_queues) { switch (adapter->num_rx_queues) {
#ifdef CONFIG_E1000_MQ #ifdef CONFIG_E1000_MQ
case 2: case 2:
rdba = adapter->rx_ring[1].dma; rdba = adapter->rx_ring[1].dma;
...@@ -1674,7 +1805,7 @@ e1000_configure_rx(struct e1000_adapter *adapter) ...@@ -1674,7 +1805,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
} }
#ifdef CONFIG_E1000_MQ #ifdef CONFIG_E1000_MQ
if (adapter->num_queues > 1) { if (adapter->num_rx_queues > 1) {
uint32_t random[10]; uint32_t random[10];
get_random_bytes(&random[0], 40); get_random_bytes(&random[0], 40);
...@@ -1684,7 +1815,7 @@ e1000_configure_rx(struct e1000_adapter *adapter) ...@@ -1684,7 +1815,7 @@ e1000_configure_rx(struct e1000_adapter *adapter)
E1000_WRITE_REG(hw, RSSIM, 0); E1000_WRITE_REG(hw, RSSIM, 0);
} }
switch (adapter->num_queues) { switch (adapter->num_rx_queues) {
case 2: case 2:
default: default:
reta = 0x00800080; reta = 0x00800080;
...@@ -1776,7 +1907,7 @@ e1000_free_all_tx_resources(struct e1000_adapter *adapter) ...@@ -1776,7 +1907,7 @@ e1000_free_all_tx_resources(struct e1000_adapter *adapter)
{ {
int i; int i;
for (i = 0; i < adapter->num_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
e1000_free_tx_resources(adapter, &adapter->tx_ring[i]); e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
} }
...@@ -1789,12 +1920,10 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter, ...@@ -1789,12 +1920,10 @@ e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
buffer_info->dma, buffer_info->dma,
buffer_info->length, buffer_info->length,
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
buffer_info->dma = 0;
} }
if(buffer_info->skb) { if (buffer_info->skb)
dev_kfree_skb_any(buffer_info->skb); dev_kfree_skb_any(buffer_info->skb);
buffer_info->skb = NULL; memset(buffer_info, 0, sizeof(struct e1000_buffer));
}
} }
/** /**
...@@ -1843,7 +1972,7 @@ e1000_clean_all_tx_rings(struct e1000_adapter *adapter) ...@@ -1843,7 +1972,7 @@ e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
{ {
int i; int i;
for (i = 0; i < adapter->num_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]); e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
} }
...@@ -1887,7 +2016,7 @@ e1000_free_all_rx_resources(struct e1000_adapter *adapter) ...@@ -1887,7 +2016,7 @@ e1000_free_all_rx_resources(struct e1000_adapter *adapter)
{ {
int i; int i;
for (i = 0; i < adapter->num_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
e1000_free_rx_resources(adapter, &adapter->rx_ring[i]); e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
} }
...@@ -1913,8 +2042,6 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter, ...@@ -1913,8 +2042,6 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
for(i = 0; i < rx_ring->count; i++) { for(i = 0; i < rx_ring->count; i++) {
buffer_info = &rx_ring->buffer_info[i]; buffer_info = &rx_ring->buffer_info[i];
if(buffer_info->skb) { if(buffer_info->skb) {
ps_page = &rx_ring->ps_page[i];
ps_page_dma = &rx_ring->ps_page_dma[i];
pci_unmap_single(pdev, pci_unmap_single(pdev,
buffer_info->dma, buffer_info->dma,
buffer_info->length, buffer_info->length,
...@@ -1922,19 +2049,30 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter, ...@@ -1922,19 +2049,30 @@ e1000_clean_rx_ring(struct e1000_adapter *adapter,
dev_kfree_skb(buffer_info->skb); dev_kfree_skb(buffer_info->skb);
buffer_info->skb = NULL; buffer_info->skb = NULL;
}
for(j = 0; j < adapter->rx_ps_pages; j++) { ps_page = &rx_ring->ps_page[i];
if(!ps_page->ps_page[j]) break; ps_page_dma = &rx_ring->ps_page_dma[i];
pci_unmap_single(pdev, for (j = 0; j < adapter->rx_ps_pages; j++) {
ps_page_dma->ps_page_dma[j], if (!ps_page->ps_page[j]) break;
PAGE_SIZE, PCI_DMA_FROMDEVICE); pci_unmap_page(pdev,
ps_page_dma->ps_page_dma[j] = 0; ps_page_dma->ps_page_dma[j],
put_page(ps_page->ps_page[j]); PAGE_SIZE, PCI_DMA_FROMDEVICE);
ps_page->ps_page[j] = NULL; ps_page_dma->ps_page_dma[j] = 0;
} put_page(ps_page->ps_page[j]);
ps_page->ps_page[j] = NULL;
} }
} }
/* there also may be some cached data in our adapter */
if (rx_ring->rx_skb_top) {
dev_kfree_skb(rx_ring->rx_skb_top);
/* rx_skb_prev will be wiped out by rx_skb_top */
rx_ring->rx_skb_top = NULL;
rx_ring->rx_skb_prev = NULL;
}
size = sizeof(struct e1000_buffer) * rx_ring->count; size = sizeof(struct e1000_buffer) * rx_ring->count;
memset(rx_ring->buffer_info, 0, size); memset(rx_ring->buffer_info, 0, size);
size = sizeof(struct e1000_ps_page) * rx_ring->count; size = sizeof(struct e1000_ps_page) * rx_ring->count;
...@@ -1963,7 +2101,7 @@ e1000_clean_all_rx_rings(struct e1000_adapter *adapter) ...@@ -1963,7 +2101,7 @@ e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
{ {
int i; int i;
for (i = 0; i < adapter->num_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]); e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
} }
...@@ -2005,7 +2143,9 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter) ...@@ -2005,7 +2143,9 @@ e1000_leave_82542_rst(struct e1000_adapter *adapter)
if(netif_running(netdev)) { if(netif_running(netdev)) {
e1000_configure_rx(adapter); e1000_configure_rx(adapter);
e1000_alloc_rx_buffers(adapter, &adapter->rx_ring[0]); /* No need to loop, because 82542 supports only 1 queue */
struct e1000_rx_ring *ring = &adapter->rx_ring[0];
adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
} }
} }
...@@ -2204,7 +2344,7 @@ static void ...@@ -2204,7 +2344,7 @@ static void
e1000_watchdog_task(struct e1000_adapter *adapter) e1000_watchdog_task(struct e1000_adapter *adapter)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct e1000_tx_ring *txdr = &adapter->tx_ring[0]; struct e1000_tx_ring *txdr = adapter->tx_ring;
uint32_t link; uint32_t link;
e1000_check_for_link(&adapter->hw); e1000_check_for_link(&adapter->hw);
...@@ -2231,6 +2371,21 @@ e1000_watchdog_task(struct e1000_adapter *adapter) ...@@ -2231,6 +2371,21 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
adapter->link_duplex == FULL_DUPLEX ? adapter->link_duplex == FULL_DUPLEX ?
"Full Duplex" : "Half Duplex"); "Full Duplex" : "Half Duplex");
/* tweak tx_queue_len according to speed/duplex */
netdev->tx_queue_len = adapter->tx_queue_len;
adapter->tx_timeout_factor = 1;
if (adapter->link_duplex == HALF_DUPLEX) {
switch (adapter->link_speed) {
case SPEED_10:
netdev->tx_queue_len = 10;
adapter->tx_timeout_factor = 8;
break;
case SPEED_100:
netdev->tx_queue_len = 100;
break;
}
}
netif_carrier_on(netdev); netif_carrier_on(netdev);
netif_wake_queue(netdev); netif_wake_queue(netdev);
mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ); mod_timer(&adapter->phy_info_timer, jiffies + 2 * HZ);
...@@ -2263,7 +2418,10 @@ e1000_watchdog_task(struct e1000_adapter *adapter) ...@@ -2263,7 +2418,10 @@ e1000_watchdog_task(struct e1000_adapter *adapter)
e1000_update_adaptive(&adapter->hw); e1000_update_adaptive(&adapter->hw);
if (adapter->num_queues == 1 && !netif_carrier_ok(netdev)) { #ifdef CONFIG_E1000_MQ
txdr = *per_cpu_ptr(adapter->cpu_tx_ring, smp_processor_id());
#endif
if (!netif_carrier_ok(netdev)) {
if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) { if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
/* We've lost link, so the controller stops DMA, /* We've lost link, so the controller stops DMA,
* but we've got queued Tx work that's never going * but we've got queued Tx work that's never going
...@@ -2314,6 +2472,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, ...@@ -2314,6 +2472,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
{ {
#ifdef NETIF_F_TSO #ifdef NETIF_F_TSO
struct e1000_context_desc *context_desc; struct e1000_context_desc *context_desc;
struct e1000_buffer *buffer_info;
unsigned int i; unsigned int i;
uint32_t cmd_length = 0; uint32_t cmd_length = 0;
uint16_t ipcse = 0, tucse, mss; uint16_t ipcse = 0, tucse, mss;
...@@ -2363,6 +2522,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, ...@@ -2363,6 +2522,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
i = tx_ring->next_to_use; i = tx_ring->next_to_use;
context_desc = E1000_CONTEXT_DESC(*tx_ring, i); context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
buffer_info = &tx_ring->buffer_info[i];
context_desc->lower_setup.ip_fields.ipcss = ipcss; context_desc->lower_setup.ip_fields.ipcss = ipcss;
context_desc->lower_setup.ip_fields.ipcso = ipcso; context_desc->lower_setup.ip_fields.ipcso = ipcso;
...@@ -2374,14 +2534,16 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, ...@@ -2374,14 +2534,16 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
context_desc->tcp_seg_setup.fields.hdr_len = hdr_len; context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
context_desc->cmd_and_length = cpu_to_le32(cmd_length); context_desc->cmd_and_length = cpu_to_le32(cmd_length);
buffer_info->time_stamp = jiffies;
if (++i == tx_ring->count) i = 0; if (++i == tx_ring->count) i = 0;
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
return 1; return TRUE;
} }
#endif #endif
return 0; return FALSE;
} }
static inline boolean_t static inline boolean_t
...@@ -2389,6 +2551,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, ...@@ -2389,6 +2551,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct e1000_context_desc *context_desc; struct e1000_context_desc *context_desc;
struct e1000_buffer *buffer_info;
unsigned int i; unsigned int i;
uint8_t css; uint8_t css;
...@@ -2396,6 +2559,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, ...@@ -2396,6 +2559,7 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
css = skb->h.raw - skb->data; css = skb->h.raw - skb->data;
i = tx_ring->next_to_use; i = tx_ring->next_to_use;
buffer_info = &tx_ring->buffer_info[i];
context_desc = E1000_CONTEXT_DESC(*tx_ring, i); context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
context_desc->upper_setup.tcp_fields.tucss = css; context_desc->upper_setup.tcp_fields.tucss = css;
...@@ -2404,6 +2568,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, ...@@ -2404,6 +2568,8 @@ e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
context_desc->tcp_seg_setup.data = 0; context_desc->tcp_seg_setup.data = 0;
context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT); context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
buffer_info->time_stamp = jiffies;
if (unlikely(++i == tx_ring->count)) i = 0; if (unlikely(++i == tx_ring->count)) i = 0;
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
...@@ -2688,11 +2854,30 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -2688,11 +2854,30 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
* overrun the FIFO, adjust the max buffer len if mss * overrun the FIFO, adjust the max buffer len if mss
* drops. */ * drops. */
if(mss) { if(mss) {
uint8_t hdr_len;
max_per_txd = min(mss << 2, max_per_txd); max_per_txd = min(mss << 2, max_per_txd);
max_txd_pwr = fls(max_per_txd) - 1; max_txd_pwr = fls(max_per_txd) - 1;
/* TSO Workaround for 82571/2 Controllers -- if skb->data
* points to just header, pull a few bytes of payload from
* frags into skb->data */
hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
if (skb->data_len && (hdr_len == (skb->len - skb->data_len)) &&
(adapter->hw.mac_type == e1000_82571 ||
adapter->hw.mac_type == e1000_82572)) {
unsigned int pull_size;
pull_size = min((unsigned int)4, skb->data_len);
if (!__pskb_pull_tail(skb, pull_size)) {
printk(KERN_ERR "__pskb_pull_tail failed.\n");
dev_kfree_skb_any(skb);
return -EFAULT;
}
len = skb->len - skb->data_len;
}
} }
if((mss) || (skb->ip_summed == CHECKSUM_HW)) if((mss) || (skb->ip_summed == CHECKSUM_HW))
/* reserve a descriptor for the offload context */
count++; count++;
count++; count++;
#else #else
...@@ -2726,27 +2911,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) ...@@ -2726,27 +2911,6 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
if(adapter->pcix_82544) if(adapter->pcix_82544)
count += nr_frags; count += nr_frags;
#ifdef NETIF_F_TSO
/* TSO Workaround for 82571/2 Controllers -- if skb->data
* points to just header, pull a few bytes of payload from
* frags into skb->data */
if (skb_shinfo(skb)->tso_size) {
uint8_t hdr_len;
hdr_len = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
if (skb->data_len && (hdr_len < (skb->len - skb->data_len)) &&
(adapter->hw.mac_type == e1000_82571 ||
adapter->hw.mac_type == e1000_82572)) {
unsigned int pull_size;
pull_size = min((unsigned int)4, skb->data_len);
if (!__pskb_pull_tail(skb, pull_size)) {
printk(KERN_ERR "__pskb_pull_tail failed.\n");
dev_kfree_skb_any(skb);
return -EFAULT;
}
}
}
#endif
if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) ) if(adapter->hw.tx_pkt_filtering && (adapter->hw.mac_type == e1000_82573) )
e1000_transfer_dhcp_info(adapter, skb); e1000_transfer_dhcp_info(adapter, skb);
...@@ -2833,6 +2997,7 @@ e1000_tx_timeout_task(struct net_device *netdev) ...@@ -2833,6 +2997,7 @@ e1000_tx_timeout_task(struct net_device *netdev)
{ {
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
adapter->tx_timeout_count++;
e1000_down(adapter); e1000_down(adapter);
e1000_up(adapter); e1000_up(adapter);
} }
...@@ -2850,7 +3015,7 @@ e1000_get_stats(struct net_device *netdev) ...@@ -2850,7 +3015,7 @@ e1000_get_stats(struct net_device *netdev)
{ {
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
e1000_update_stats(adapter); /* only return the current stats */
return &adapter->net_stats; return &adapter->net_stats;
} }
...@@ -2871,50 +3036,51 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -2871,50 +3036,51 @@ e1000_change_mtu(struct net_device *netdev, int new_mtu)
if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) || if((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
(max_frame > MAX_JUMBO_FRAME_SIZE)) { (max_frame > MAX_JUMBO_FRAME_SIZE)) {
DPRINTK(PROBE, ERR, "Invalid MTU setting\n"); DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
return -EINVAL;
}
#define MAX_STD_JUMBO_FRAME_SIZE 9234
/* might want this to be bigger enum check... */
/* 82571 controllers limit jumbo frame size to 10500 bytes */
if ((adapter->hw.mac_type == e1000_82571 ||
adapter->hw.mac_type == e1000_82572) &&
max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
DPRINTK(PROBE, ERR, "MTU > 9216 bytes not supported "
"on 82571 and 82572 controllers.\n");
return -EINVAL; return -EINVAL;
} }
if(adapter->hw.mac_type == e1000_82573 && /* Adapter-specific max frame size limits. */
max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) { switch (adapter->hw.mac_type) {
DPRINTK(PROBE, ERR, "Jumbo Frames not supported " case e1000_82542_rev2_0:
"on 82573\n"); case e1000_82542_rev2_1:
return -EINVAL; case e1000_82573:
} if (max_frame > MAXIMUM_ETHERNET_FRAME_SIZE) {
DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
if(adapter->hw.mac_type > e1000_82547_rev_2) { return -EINVAL;
adapter->rx_buffer_len = max_frame; }
E1000_ROUNDUP(adapter->rx_buffer_len, 1024); break;
} else { case e1000_82571:
if(unlikely((adapter->hw.mac_type < e1000_82543) && case e1000_82572:
(max_frame > MAXIMUM_ETHERNET_FRAME_SIZE))) { #define MAX_STD_JUMBO_FRAME_SIZE 9234
DPRINTK(PROBE, ERR, "Jumbo Frames not supported " if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
"on 82542\n"); DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
return -EINVAL; return -EINVAL;
} else {
if(max_frame <= E1000_RXBUFFER_2048) {
adapter->rx_buffer_len = E1000_RXBUFFER_2048;
} else if(max_frame <= E1000_RXBUFFER_4096) {
adapter->rx_buffer_len = E1000_RXBUFFER_4096;
} else if(max_frame <= E1000_RXBUFFER_8192) {
adapter->rx_buffer_len = E1000_RXBUFFER_8192;
} else if(max_frame <= E1000_RXBUFFER_16384) {
adapter->rx_buffer_len = E1000_RXBUFFER_16384;
}
} }
break;
default:
/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
break;
} }
/* since the driver code now supports splitting a packet across
* multiple descriptors, most of the fifo related limitations on
* jumbo frame traffic have gone away.
* simply use 2k descriptors for everything.
*
* NOTE: dev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
* means we reserve 2 more, this pushes us to allocate from the next
* larger slab size
* i.e. RXBUFFER_2048 --> size-4096 slab */
/* recent hardware supports 1KB granularity */
if (adapter->hw.mac_type > e1000_82547_rev_2) {
adapter->rx_buffer_len =
((max_frame < E1000_RXBUFFER_2048) ?
max_frame : E1000_RXBUFFER_2048);
E1000_ROUNDUP(adapter->rx_buffer_len, 1024);
} else
adapter->rx_buffer_len = E1000_RXBUFFER_2048;
netdev->mtu = new_mtu; netdev->mtu = new_mtu;
if(netif_running(netdev)) { if(netif_running(netdev)) {
...@@ -3037,12 +3203,11 @@ e1000_update_stats(struct e1000_adapter *adapter) ...@@ -3037,12 +3203,11 @@ e1000_update_stats(struct e1000_adapter *adapter)
adapter->net_stats.rx_errors = adapter->stats.rxerrc + adapter->net_stats.rx_errors = adapter->stats.rxerrc +
adapter->stats.crcerrs + adapter->stats.algnerrc + adapter->stats.crcerrs + adapter->stats.algnerrc +
adapter->stats.rlec + adapter->stats.mpc + adapter->stats.rlec + adapter->stats.cexterr;
adapter->stats.cexterr; adapter->net_stats.rx_dropped = 0;
adapter->net_stats.rx_length_errors = adapter->stats.rlec; adapter->net_stats.rx_length_errors = adapter->stats.rlec;
adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs;
adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc;
adapter->net_stats.rx_fifo_errors = adapter->stats.mpc;
adapter->net_stats.rx_missed_errors = adapter->stats.mpc; adapter->net_stats.rx_missed_errors = adapter->stats.mpc;
/* Tx Errors */ /* Tx Errors */
...@@ -3110,12 +3275,24 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) ...@@ -3110,12 +3275,24 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
uint32_t icr = E1000_READ_REG(hw, ICR); uint32_t icr = E1000_READ_REG(hw, ICR);
#if defined(CONFIG_E1000_NAPI) && defined(CONFIG_E1000_MQ) || !defined(CONFIG_E1000_NAPI) #ifndef CONFIG_E1000_NAPI
int i; int i;
#else
/* Interrupt Auto-Mask...upon reading ICR,
* interrupts are masked. No need for the
* IMC write, but it does mean we should
* account for it ASAP. */
if (likely(hw->mac_type >= e1000_82571))
atomic_inc(&adapter->irq_sem);
#endif #endif
if(unlikely(!icr)) if (unlikely(!icr)) {
#ifdef CONFIG_E1000_NAPI
if (hw->mac_type >= e1000_82571)
e1000_irq_enable(adapter);
#endif
return IRQ_NONE; /* Not our interrupt */ return IRQ_NONE; /* Not our interrupt */
}
if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) { if(unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
hw->get_link_status = 1; hw->get_link_status = 1;
...@@ -3123,19 +3300,19 @@ e1000_intr(int irq, void *data, struct pt_regs *regs) ...@@ -3123,19 +3300,19 @@ e1000_intr(int irq, void *data, struct pt_regs *regs)
} }
#ifdef CONFIG_E1000_NAPI #ifdef CONFIG_E1000_NAPI
atomic_inc(&adapter->irq_sem); if (unlikely(hw->mac_type < e1000_82571)) {
E1000_WRITE_REG(hw, IMC, ~0); atomic_inc(&adapter->irq_sem);
E1000_WRITE_FLUSH(hw); E1000_WRITE_REG(hw, IMC, ~0);
E1000_WRITE_FLUSH(hw);
}
#ifdef CONFIG_E1000_MQ #ifdef CONFIG_E1000_MQ
if (atomic_read(&adapter->rx_sched_call_data.count) == 0) { if (atomic_read(&adapter->rx_sched_call_data.count) == 0) {
cpu_set(adapter->cpu_for_queue[0], /* We must setup the cpumask once count == 0 since
adapter->rx_sched_call_data.cpumask); * each cpu bit is cleared when the work is done. */
for (i = 1; i < adapter->num_queues; i++) { adapter->rx_sched_call_data.cpumask = adapter->cpumask;
cpu_set(adapter->cpu_for_queue[i], atomic_add(adapter->num_rx_queues - 1, &adapter->irq_sem);
adapter->rx_sched_call_data.cpumask); atomic_set(&adapter->rx_sched_call_data.count,
atomic_inc(&adapter->irq_sem); adapter->num_rx_queues);
}
atomic_set(&adapter->rx_sched_call_data.count, i);
smp_call_async_mask(&adapter->rx_sched_call_data); smp_call_async_mask(&adapter->rx_sched_call_data);
} else { } else {
printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count)); printk("call_data.count == %u\n", atomic_read(&adapter->rx_sched_call_data.count));
...@@ -3187,7 +3364,7 @@ e1000_clean(struct net_device *poll_dev, int *budget) ...@@ -3187,7 +3364,7 @@ e1000_clean(struct net_device *poll_dev, int *budget)
{ {
struct e1000_adapter *adapter; struct e1000_adapter *adapter;
int work_to_do = min(*budget, poll_dev->quota); int work_to_do = min(*budget, poll_dev->quota);
int tx_cleaned, i = 0, work_done = 0; int tx_cleaned = 0, i = 0, work_done = 0;
/* Must NOT use netdev_priv macro here. */ /* Must NOT use netdev_priv macro here. */
adapter = poll_dev->priv; adapter = poll_dev->priv;
...@@ -3198,11 +3375,23 @@ e1000_clean(struct net_device *poll_dev, int *budget) ...@@ -3198,11 +3375,23 @@ e1000_clean(struct net_device *poll_dev, int *budget)
while (poll_dev != &adapter->polling_netdev[i]) { while (poll_dev != &adapter->polling_netdev[i]) {
i++; i++;
if (unlikely(i == adapter->num_queues)) if (unlikely(i == adapter->num_rx_queues))
BUG(); BUG();
} }
tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]); if (likely(adapter->num_tx_queues == 1)) {
/* e1000_clean is called per-cpu. This lock protects
* tx_ring[0] from being cleaned by multiple cpus
* simultaneously. A failure obtaining the lock means
* tx_ring[0] is currently being cleaned anyway. */
if (spin_trylock(&adapter->tx_queue_lock)) {
tx_cleaned = e1000_clean_tx_irq(adapter,
&adapter->tx_ring[0]);
spin_unlock(&adapter->tx_queue_lock);
}
} else
tx_cleaned = e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
adapter->clean_rx(adapter, &adapter->rx_ring[i], adapter->clean_rx(adapter, &adapter->rx_ring[i],
&work_done, work_to_do); &work_done, work_to_do);
...@@ -3247,17 +3436,19 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, ...@@ -3247,17 +3436,19 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
buffer_info = &tx_ring->buffer_info[i]; buffer_info = &tx_ring->buffer_info[i];
cleaned = (i == eop); cleaned = (i == eop);
#ifdef CONFIG_E1000_MQ
tx_ring->tx_stats.bytes += buffer_info->length;
#endif
e1000_unmap_and_free_tx_resource(adapter, buffer_info); e1000_unmap_and_free_tx_resource(adapter, buffer_info);
memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
tx_desc->buffer_addr = 0;
tx_desc->lower.data = 0;
tx_desc->upper.data = 0;
if(unlikely(++i == tx_ring->count)) i = 0; if(unlikely(++i == tx_ring->count)) i = 0;
} }
tx_ring->pkt++; #ifdef CONFIG_E1000_MQ
tx_ring->tx_stats.packets++;
#endif
eop = tx_ring->buffer_info[i].next_to_watch; eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop); eop_desc = E1000_TX_DESC(*tx_ring, eop);
} }
...@@ -3276,32 +3467,31 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter, ...@@ -3276,32 +3467,31 @@ e1000_clean_tx_irq(struct e1000_adapter *adapter,
/* Detect a transmit hang in hardware, this serializes the /* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of i */ * check with the clearing of time_stamp and movement of i */
adapter->detect_tx_hung = FALSE; adapter->detect_tx_hung = FALSE;
if (tx_ring->buffer_info[i].dma && if (tx_ring->buffer_info[eop].dma &&
time_after(jiffies, tx_ring->buffer_info[i].time_stamp + HZ) time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
adapter->tx_timeout_factor * HZ)
&& !(E1000_READ_REG(&adapter->hw, STATUS) & && !(E1000_READ_REG(&adapter->hw, STATUS) &
E1000_STATUS_TXOFF)) { E1000_STATUS_TXOFF)) {
/* detected Tx unit hang */ /* detected Tx unit hang */
i = tx_ring->next_to_clean;
eop = tx_ring->buffer_info[i].next_to_watch;
eop_desc = E1000_TX_DESC(*tx_ring, eop);
DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n" DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
" Tx Queue <%lu>\n"
" TDH <%x>\n" " TDH <%x>\n"
" TDT <%x>\n" " TDT <%x>\n"
" next_to_use <%x>\n" " next_to_use <%x>\n"
" next_to_clean <%x>\n" " next_to_clean <%x>\n"
"buffer_info[next_to_clean]\n" "buffer_info[next_to_clean]\n"
" dma <%llx>\n"
" time_stamp <%lx>\n" " time_stamp <%lx>\n"
" next_to_watch <%x>\n" " next_to_watch <%x>\n"
" jiffies <%lx>\n" " jiffies <%lx>\n"
" next_to_watch.status <%x>\n", " next_to_watch.status <%x>\n",
(unsigned long)((tx_ring - adapter->tx_ring) /
sizeof(struct e1000_tx_ring)),
readl(adapter->hw.hw_addr + tx_ring->tdh), readl(adapter->hw.hw_addr + tx_ring->tdh),
readl(adapter->hw.hw_addr + tx_ring->tdt), readl(adapter->hw.hw_addr + tx_ring->tdt),
tx_ring->next_to_use, tx_ring->next_to_use,
i, tx_ring->next_to_clean,
(unsigned long long)tx_ring->buffer_info[i].dma, tx_ring->buffer_info[eop].time_stamp,
tx_ring->buffer_info[i].time_stamp,
eop, eop,
jiffies, jiffies,
eop_desc->upper.fields.status); eop_desc->upper.fields.status);
...@@ -3386,20 +3576,23 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, ...@@ -3386,20 +3576,23 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
uint32_t length; uint32_t length;
uint8_t last_byte; uint8_t last_byte;
unsigned int i; unsigned int i;
boolean_t cleaned = FALSE; int cleaned_count = 0;
boolean_t cleaned = FALSE, multi_descriptor = FALSE;
i = rx_ring->next_to_clean; i = rx_ring->next_to_clean;
rx_desc = E1000_RX_DESC(*rx_ring, i); rx_desc = E1000_RX_DESC(*rx_ring, i);
while(rx_desc->status & E1000_RXD_STAT_DD) { while(rx_desc->status & E1000_RXD_STAT_DD) {
buffer_info = &rx_ring->buffer_info[i]; buffer_info = &rx_ring->buffer_info[i];
u8 status;
#ifdef CONFIG_E1000_NAPI #ifdef CONFIG_E1000_NAPI
if(*work_done >= work_to_do) if(*work_done >= work_to_do)
break; break;
(*work_done)++; (*work_done)++;
#endif #endif
status = rx_desc->status;
cleaned = TRUE; cleaned = TRUE;
cleaned_count++;
pci_unmap_single(pdev, pci_unmap_single(pdev,
buffer_info->dma, buffer_info->dma,
buffer_info->length, buffer_info->length,
...@@ -3433,18 +3626,40 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, ...@@ -3433,18 +3626,40 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
} }
} }
/* Good Receive */ /* code added for copybreak, this should improve
skb_put(skb, length - ETHERNET_FCS_SIZE); * performance for small packets with large amounts
* of reassembly being done in the stack */
#define E1000_CB_LENGTH 256
if ((length < E1000_CB_LENGTH) &&
!rx_ring->rx_skb_top &&
/* or maybe (status & E1000_RXD_STAT_EOP) && */
!multi_descriptor) {
struct sk_buff *new_skb =
dev_alloc_skb(length + NET_IP_ALIGN);
if (new_skb) {
skb_reserve(new_skb, NET_IP_ALIGN);
new_skb->dev = netdev;
memcpy(new_skb->data - NET_IP_ALIGN,
skb->data - NET_IP_ALIGN,
length + NET_IP_ALIGN);
/* save the skb in buffer_info as good */
buffer_info->skb = skb;
skb = new_skb;
skb_put(skb, length);
}
}
/* end copybreak code */
/* Receive Checksum Offload */ /* Receive Checksum Offload */
e1000_rx_checksum(adapter, e1000_rx_checksum(adapter,
(uint32_t)(rx_desc->status) | (uint32_t)(status) |
((uint32_t)(rx_desc->errors) << 24), ((uint32_t)(rx_desc->errors) << 24),
rx_desc->csum, skb); rx_desc->csum, skb);
skb->protocol = eth_type_trans(skb, netdev); skb->protocol = eth_type_trans(skb, netdev);
#ifdef CONFIG_E1000_NAPI #ifdef CONFIG_E1000_NAPI
if(unlikely(adapter->vlgrp && if(unlikely(adapter->vlgrp &&
(rx_desc->status & E1000_RXD_STAT_VP))) { (status & E1000_RXD_STAT_VP))) {
vlan_hwaccel_receive_skb(skb, adapter->vlgrp, vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
le16_to_cpu(rx_desc->special) & le16_to_cpu(rx_desc->special) &
E1000_RXD_SPC_VLAN_MASK); E1000_RXD_SPC_VLAN_MASK);
...@@ -3462,17 +3677,26 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter, ...@@ -3462,17 +3677,26 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
} }
#endif /* CONFIG_E1000_NAPI */ #endif /* CONFIG_E1000_NAPI */
netdev->last_rx = jiffies; netdev->last_rx = jiffies;
rx_ring->pkt++; #ifdef CONFIG_E1000_MQ
rx_ring->rx_stats.packets++;
rx_ring->rx_stats.bytes += length;
#endif
next_desc: next_desc:
rx_desc->status = 0; rx_desc->status = 0;
buffer_info->skb = NULL;
if(unlikely(++i == rx_ring->count)) i = 0;
rx_desc = E1000_RX_DESC(*rx_ring, i); /* return some buffers to hardware, one at a time is too slow */
if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
cleaned_count = 0;
}
} }
rx_ring->next_to_clean = i; rx_ring->next_to_clean = i;
adapter->alloc_rx_buf(adapter, rx_ring);
cleaned_count = E1000_DESC_UNUSED(rx_ring);
if (cleaned_count)
adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
return cleaned; return cleaned;
} }
...@@ -3501,6 +3725,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, ...@@ -3501,6 +3725,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
struct sk_buff *skb; struct sk_buff *skb;
unsigned int i, j; unsigned int i, j;
uint32_t length, staterr; uint32_t length, staterr;
int cleaned_count = 0;
boolean_t cleaned = FALSE; boolean_t cleaned = FALSE;
i = rx_ring->next_to_clean; i = rx_ring->next_to_clean;
...@@ -3517,6 +3742,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, ...@@ -3517,6 +3742,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
(*work_done)++; (*work_done)++;
#endif #endif
cleaned = TRUE; cleaned = TRUE;
cleaned_count++;
pci_unmap_single(pdev, buffer_info->dma, pci_unmap_single(pdev, buffer_info->dma,
buffer_info->length, buffer_info->length,
PCI_DMA_FROMDEVICE); PCI_DMA_FROMDEVICE);
...@@ -3593,18 +3819,28 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, ...@@ -3593,18 +3819,28 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
} }
#endif /* CONFIG_E1000_NAPI */ #endif /* CONFIG_E1000_NAPI */
netdev->last_rx = jiffies; netdev->last_rx = jiffies;
rx_ring->pkt++; #ifdef CONFIG_E1000_MQ
rx_ring->rx_stats.packets++;
rx_ring->rx_stats.bytes += length;
#endif
next_desc: next_desc:
rx_desc->wb.middle.status_error &= ~0xFF; rx_desc->wb.middle.status_error &= ~0xFF;
buffer_info->skb = NULL; buffer_info->skb = NULL;
if(unlikely(++i == rx_ring->count)) i = 0;
rx_desc = E1000_RX_DESC_PS(*rx_ring, i); /* return some buffers to hardware, one at a time is too slow */
if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
cleaned_count = 0;
}
staterr = le32_to_cpu(rx_desc->wb.middle.status_error); staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
} }
rx_ring->next_to_clean = i; rx_ring->next_to_clean = i;
adapter->alloc_rx_buf(adapter, rx_ring);
cleaned_count = E1000_DESC_UNUSED(rx_ring);
if (cleaned_count)
adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
return cleaned; return cleaned;
} }
...@@ -3616,7 +3852,8 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter, ...@@ -3616,7 +3852,8 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
static void static void
e1000_alloc_rx_buffers(struct e1000_adapter *adapter, e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring) struct e1000_rx_ring *rx_ring,
int cleaned_count)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev; struct pci_dev *pdev = adapter->pdev;
...@@ -3629,11 +3866,18 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, ...@@ -3629,11 +3866,18 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
i = rx_ring->next_to_use; i = rx_ring->next_to_use;
buffer_info = &rx_ring->buffer_info[i]; buffer_info = &rx_ring->buffer_info[i];
while(!buffer_info->skb) { while (cleaned_count--) {
skb = dev_alloc_skb(bufsz); if (!(skb = buffer_info->skb))
skb = dev_alloc_skb(bufsz);
else {
skb_trim(skb, 0);
goto map_skb;
}
if(unlikely(!skb)) { if(unlikely(!skb)) {
/* Better luck next round */ /* Better luck next round */
adapter->alloc_rx_buff_failed++;
break; break;
} }
...@@ -3670,6 +3914,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, ...@@ -3670,6 +3914,7 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
buffer_info->skb = skb; buffer_info->skb = skb;
buffer_info->length = adapter->rx_buffer_len; buffer_info->length = adapter->rx_buffer_len;
map_skb:
buffer_info->dma = pci_map_single(pdev, buffer_info->dma = pci_map_single(pdev,
skb->data, skb->data,
adapter->rx_buffer_len, adapter->rx_buffer_len,
...@@ -3718,7 +3963,8 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter, ...@@ -3718,7 +3963,8 @@ e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
static void static void
e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
struct e1000_rx_ring *rx_ring) struct e1000_rx_ring *rx_ring,
int cleaned_count)
{ {
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev; struct pci_dev *pdev = adapter->pdev;
...@@ -3734,7 +3980,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter, ...@@ -3734,7 +3980,7 @@ e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
ps_page = &rx_ring->ps_page[i]; ps_page = &rx_ring->ps_page[i];
ps_page_dma = &rx_ring->ps_page_dma[i]; ps_page_dma = &rx_ring->ps_page_dma[i];
while(!buffer_info->skb) { while (cleaned_count--) {
rx_desc = E1000_RX_DESC_PS(*rx_ring, i); rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
for(j = 0; j < PS_PAGE_BUFFERS; j++) { for(j = 0; j < PS_PAGE_BUFFERS; j++) {
...@@ -4106,8 +4352,12 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid) ...@@ -4106,8 +4352,12 @@ e1000_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
if((adapter->hw.mng_cookie.status & if((adapter->hw.mng_cookie.status &
E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) && E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
(vid == adapter->mng_vlan_id)) (vid == adapter->mng_vlan_id)) {
/* release control to f/w */
e1000_release_hw_control(adapter);
return; return;
}
/* remove VID from filter table */ /* remove VID from filter table */
index = (vid >> 5) & 0x7F; index = (vid >> 5) & 0x7F;
vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index); vfta = E1000_READ_REG_ARRAY(&adapter->hw, VFTA, index);
...@@ -4173,8 +4423,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) ...@@ -4173,8 +4423,9 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
{ {
struct net_device *netdev = pci_get_drvdata(pdev); struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
uint32_t ctrl, ctrl_ext, rctl, manc, status, swsm; uint32_t ctrl, ctrl_ext, rctl, manc, status;
uint32_t wufc = adapter->wol; uint32_t wufc = adapter->wol;
int retval = 0;
netif_device_detach(netdev); netif_device_detach(netdev);
...@@ -4220,13 +4471,21 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) ...@@ -4220,13 +4471,21 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN); E1000_WRITE_REG(&adapter->hw, WUC, E1000_WUC_PME_EN);
E1000_WRITE_REG(&adapter->hw, WUFC, wufc); E1000_WRITE_REG(&adapter->hw, WUFC, wufc);
pci_enable_wake(pdev, 3, 1); retval = pci_enable_wake(pdev, PCI_D3hot, 1);
pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */ if (retval)
DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
retval = pci_enable_wake(pdev, PCI_D3cold, 1);
if (retval)
DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
} else { } else {
E1000_WRITE_REG(&adapter->hw, WUC, 0); E1000_WRITE_REG(&adapter->hw, WUC, 0);
E1000_WRITE_REG(&adapter->hw, WUFC, 0); E1000_WRITE_REG(&adapter->hw, WUFC, 0);
pci_enable_wake(pdev, 3, 0); retval = pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, 4, 0); /* 4 == D3 cold */ if (retval)
DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
retval = pci_enable_wake(pdev, PCI_D3cold, 0); /* 4 == D3 cold */
if (retval)
DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
} }
pci_save_state(pdev); pci_save_state(pdev);
...@@ -4237,29 +4496,24 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state) ...@@ -4237,29 +4496,24 @@ e1000_suspend(struct pci_dev *pdev, pm_message_t state)
if(manc & E1000_MANC_SMBUS_EN) { if(manc & E1000_MANC_SMBUS_EN) {
manc |= E1000_MANC_ARP_EN; manc |= E1000_MANC_ARP_EN;
E1000_WRITE_REG(&adapter->hw, MANC, manc); E1000_WRITE_REG(&adapter->hw, MANC, manc);
pci_enable_wake(pdev, 3, 1); retval = pci_enable_wake(pdev, PCI_D3hot, 1);
pci_enable_wake(pdev, 4, 1); /* 4 == D3 cold */ if (retval)
DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
retval = pci_enable_wake(pdev, PCI_D3cold, 1);
if (retval)
DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
} }
} }
switch(adapter->hw.mac_type) { /* Release control of h/w to f/w. If f/w is AMT enabled, this
case e1000_82571: * would have already happened in close and is redundant. */
case e1000_82572: e1000_release_hw_control(adapter);
ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
break;
case e1000_82573:
swsm = E1000_READ_REG(&adapter->hw, SWSM);
E1000_WRITE_REG(&adapter->hw, SWSM,
swsm & ~E1000_SWSM_DRV_LOAD);
break;
default:
break;
}
pci_disable_device(pdev); pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
retval = pci_set_power_state(pdev, pci_choose_state(pdev, state));
if (retval)
DPRINTK(PROBE, ERR, "Error in setting power state\n");
return 0; return 0;
} }
...@@ -4269,16 +4523,21 @@ e1000_resume(struct pci_dev *pdev) ...@@ -4269,16 +4523,21 @@ e1000_resume(struct pci_dev *pdev)
{ {
struct net_device *netdev = pci_get_drvdata(pdev); struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
uint32_t manc, ret_val, swsm; int retval;
uint32_t ctrl_ext; uint32_t manc, ret_val;
pci_set_power_state(pdev, PCI_D0); retval = pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev); if (retval)
DPRINTK(PROBE, ERR, "Error in setting power state\n");
ret_val = pci_enable_device(pdev); ret_val = pci_enable_device(pdev);
pci_set_master(pdev); pci_set_master(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0); retval = pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0); if (retval)
DPRINTK(PROBE, ERR, "Error enabling D3 wake\n");
retval = pci_enable_wake(pdev, PCI_D3cold, 0);
if (retval)
DPRINTK(PROBE, ERR, "Error enabling D3 cold wake\n");
e1000_reset(adapter); e1000_reset(adapter);
E1000_WRITE_REG(&adapter->hw, WUS, ~0); E1000_WRITE_REG(&adapter->hw, WUS, ~0);
...@@ -4295,21 +4554,13 @@ e1000_resume(struct pci_dev *pdev) ...@@ -4295,21 +4554,13 @@ e1000_resume(struct pci_dev *pdev)
E1000_WRITE_REG(&adapter->hw, MANC, manc); E1000_WRITE_REG(&adapter->hw, MANC, manc);
} }
switch(adapter->hw.mac_type) { /* If the controller is 82573 and f/w is AMT, do not set
case e1000_82571: * DRV_LOAD until the interface is up. For all other cases,
case e1000_82572: * let the f/w know that the h/w is now under the control
ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT); * of the driver. */
E1000_WRITE_REG(&adapter->hw, CTRL_EXT, if (adapter->hw.mac_type != e1000_82573 ||
ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); !e1000_check_mng_mode(&adapter->hw))
break; e1000_get_hw_control(adapter);
case e1000_82573:
swsm = E1000_READ_REG(&adapter->hw, SWSM);
E1000_WRITE_REG(&adapter->hw, SWSM,
swsm | E1000_SWSM_DRV_LOAD);
break;
default:
break;
}
return 0; return 0;
} }
...@@ -4327,6 +4578,9 @@ e1000_netpoll(struct net_device *netdev) ...@@ -4327,6 +4578,9 @@ e1000_netpoll(struct net_device *netdev)
disable_irq(adapter->pdev->irq); disable_irq(adapter->pdev->irq);
e1000_intr(adapter->pdev->irq, netdev, NULL); e1000_intr(adapter->pdev->irq, netdev, NULL);
e1000_clean_tx_irq(adapter, adapter->tx_ring); e1000_clean_tx_irq(adapter, adapter->tx_ring);
#ifndef CONFIG_E1000_NAPI
adapter->clean_rx(adapter, adapter->rx_ring);
#endif
enable_irq(adapter->pdev->irq); enable_irq(adapter->pdev->irq);
} }
#endif #endif
......
...@@ -177,7 +177,7 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay"); ...@@ -177,7 +177,7 @@ E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
* *
* Valid Range: 100-100000 (0=off, 1=dynamic) * Valid Range: 100-100000 (0=off, 1=dynamic)
* *
* Default Value: 1 * Default Value: 8000
*/ */
E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate"); E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
...@@ -320,7 +320,7 @@ e1000_check_options(struct e1000_adapter *adapter) ...@@ -320,7 +320,7 @@ e1000_check_options(struct e1000_adapter *adapter)
} else { } else {
tx_ring->count = opt.def; tx_ring->count = opt.def;
} }
for (i = 0; i < adapter->num_queues; i++) for (i = 0; i < adapter->num_tx_queues; i++)
tx_ring[i].count = tx_ring->count; tx_ring[i].count = tx_ring->count;
} }
{ /* Receive Descriptor Count */ { /* Receive Descriptor Count */
...@@ -346,7 +346,7 @@ e1000_check_options(struct e1000_adapter *adapter) ...@@ -346,7 +346,7 @@ e1000_check_options(struct e1000_adapter *adapter)
} else { } else {
rx_ring->count = opt.def; rx_ring->count = opt.def;
} }
for (i = 0; i < adapter->num_queues; i++) for (i = 0; i < adapter->num_rx_queues; i++)
rx_ring[i].count = rx_ring->count; rx_ring[i].count = rx_ring->count;
} }
{ /* Checksum Offload Enable/Disable */ { /* Checksum Offload Enable/Disable */
...@@ -388,7 +388,7 @@ e1000_check_options(struct e1000_adapter *adapter) ...@@ -388,7 +388,7 @@ e1000_check_options(struct e1000_adapter *adapter)
e1000_validate_option(&fc, &opt, adapter); e1000_validate_option(&fc, &opt, adapter);
adapter->hw.fc = adapter->hw.original_fc = fc; adapter->hw.fc = adapter->hw.original_fc = fc;
} else { } else {
adapter->hw.fc = opt.def; adapter->hw.fc = adapter->hw.original_fc = opt.def;
} }
} }
{ /* Transmit Interrupt Delay */ { /* Transmit Interrupt Delay */
...@@ -584,6 +584,12 @@ e1000_check_copper_options(struct e1000_adapter *adapter) ...@@ -584,6 +584,12 @@ e1000_check_copper_options(struct e1000_adapter *adapter)
.p = dplx_list }} .p = dplx_list }}
}; };
if (e1000_check_phy_reset_block(&adapter->hw)) {
DPRINTK(PROBE, INFO,
"Link active due to SoL/IDER Session. "
"Speed/Duplex/AutoNeg parameter ignored.\n");
return;
}
if (num_Duplex > bd) { if (num_Duplex > bd) {
dplx = Duplex[bd]; dplx = Duplex[bd];
e1000_validate_option(&dplx, &opt, adapter); e1000_validate_option(&dplx, &opt, adapter);
......
...@@ -35,6 +35,8 @@ ...@@ -35,6 +35,8 @@
#include <linux/tcp.h> #include <linux/tcp.h>
#include <linux/udp.h> #include <linux/udp.h>
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/delay.h> #include <linux/delay.h>
...@@ -55,13 +57,15 @@ ...@@ -55,13 +57,15 @@
/* Constants */ /* Constants */
#define VLAN_HLEN 4 #define VLAN_HLEN 4
#define FCS_LEN 4 #define FCS_LEN 4
#define WRAP NET_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN #define DMA_ALIGN 8 /* hw requires 8-byte alignment */
#define HW_IP_ALIGN 2 /* hw aligns IP header */
#define WRAP HW_IP_ALIGN + ETH_HLEN + VLAN_HLEN + FCS_LEN
#define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7) #define RX_SKB_SIZE ((dev->mtu + WRAP + 7) & ~0x7)
#define INT_CAUSE_UNMASK_ALL 0x0007ffff #define INT_UNMASK_ALL 0x0007ffff
#define INT_CAUSE_UNMASK_ALL_EXT 0x0011ffff #define INT_UNMASK_ALL_EXT 0x0011ffff
#define INT_CAUSE_MASK_ALL 0x00000000 #define INT_MASK_ALL 0x00000000
#define INT_CAUSE_MASK_ALL_EXT 0x00000000 #define INT_MASK_ALL_EXT 0x00000000
#define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL #define INT_CAUSE_CHECK_BITS INT_CAUSE_UNMASK_ALL
#define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT #define INT_CAUSE_CHECK_BITS_EXT INT_CAUSE_UNMASK_ALL_EXT
...@@ -78,8 +82,9 @@ ...@@ -78,8 +82,9 @@
static int eth_port_link_is_up(unsigned int eth_port_num); static int eth_port_link_is_up(unsigned int eth_port_num);
static void eth_port_uc_addr_get(struct net_device *dev, static void eth_port_uc_addr_get(struct net_device *dev,
unsigned char *MacAddr); unsigned char *MacAddr);
static int mv643xx_eth_real_open(struct net_device *); static void eth_port_set_multicast_list(struct net_device *);
static int mv643xx_eth_real_stop(struct net_device *); static int mv643xx_eth_open(struct net_device *);
static int mv643xx_eth_stop(struct net_device *);
static int mv643xx_eth_change_mtu(struct net_device *, int); static int mv643xx_eth_change_mtu(struct net_device *, int);
static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *); static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *);
static void eth_port_init_mac_tables(unsigned int eth_port_num); static void eth_port_init_mac_tables(unsigned int eth_port_num);
...@@ -124,15 +129,8 @@ static inline void mv_write(int offset, u32 data) ...@@ -124,15 +129,8 @@ static inline void mv_write(int offset, u32 data)
*/ */
static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
{ {
struct mv643xx_private *mp = netdev_priv(dev); if ((new_mtu > 9500) || (new_mtu < 64))
unsigned long flags;
spin_lock_irqsave(&mp->lock, flags);
if ((new_mtu > 9500) || (new_mtu < 64)) {
spin_unlock_irqrestore(&mp->lock, flags);
return -EINVAL; return -EINVAL;
}
dev->mtu = new_mtu; dev->mtu = new_mtu;
/* /*
...@@ -142,17 +140,13 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu) ...@@ -142,17 +140,13 @@ static int mv643xx_eth_change_mtu(struct net_device *dev, int new_mtu)
* to memory is full, which might fail the open function. * to memory is full, which might fail the open function.
*/ */
if (netif_running(dev)) { if (netif_running(dev)) {
if (mv643xx_eth_real_stop(dev)) mv643xx_eth_stop(dev);
printk(KERN_ERR if (mv643xx_eth_open(dev))
"%s: Fatal error on stopping device\n",
dev->name);
if (mv643xx_eth_real_open(dev))
printk(KERN_ERR printk(KERN_ERR
"%s: Fatal error on opening device\n", "%s: Fatal error on opening device\n",
dev->name); dev->name);
} }
spin_unlock_irqrestore(&mp->lock, flags);
return 0; return 0;
} }
...@@ -170,15 +164,19 @@ static void mv643xx_eth_rx_task(void *data) ...@@ -170,15 +164,19 @@ static void mv643xx_eth_rx_task(void *data)
struct mv643xx_private *mp = netdev_priv(dev); struct mv643xx_private *mp = netdev_priv(dev);
struct pkt_info pkt_info; struct pkt_info pkt_info;
struct sk_buff *skb; struct sk_buff *skb;
int unaligned;
if (test_and_set_bit(0, &mp->rx_task_busy)) if (test_and_set_bit(0, &mp->rx_task_busy))
panic("%s: Error in test_set_bit / clear_bit", dev->name); panic("%s: Error in test_set_bit / clear_bit", dev->name);
while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) { while (mp->rx_ring_skbs < (mp->rx_ring_size - 5)) {
skb = dev_alloc_skb(RX_SKB_SIZE); skb = dev_alloc_skb(RX_SKB_SIZE + DMA_ALIGN);
if (!skb) if (!skb)
break; break;
mp->rx_ring_skbs++; mp->rx_ring_skbs++;
unaligned = (u32)skb->data & (DMA_ALIGN - 1);
if (unaligned)
skb_reserve(skb, DMA_ALIGN - unaligned);
pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT; pkt_info.cmd_sts = ETH_RX_ENABLE_INTERRUPT;
pkt_info.byte_cnt = RX_SKB_SIZE; pkt_info.byte_cnt = RX_SKB_SIZE;
pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE, pkt_info.buf_ptr = dma_map_single(NULL, skb->data, RX_SKB_SIZE,
...@@ -189,7 +187,7 @@ static void mv643xx_eth_rx_task(void *data) ...@@ -189,7 +187,7 @@ static void mv643xx_eth_rx_task(void *data)
"%s: Error allocating RX Ring\n", dev->name); "%s: Error allocating RX Ring\n", dev->name);
break; break;
} }
skb_reserve(skb, 2); skb_reserve(skb, HW_IP_ALIGN);
} }
clear_bit(0, &mp->rx_task_busy); clear_bit(0, &mp->rx_task_busy);
/* /*
...@@ -207,7 +205,7 @@ static void mv643xx_eth_rx_task(void *data) ...@@ -207,7 +205,7 @@ static void mv643xx_eth_rx_task(void *data)
else { else {
/* Return interrupts */ /* Return interrupts */
mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp->port_num), mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(mp->port_num),
INT_CAUSE_UNMASK_ALL); INT_UNMASK_ALL);
} }
#endif #endif
} }
...@@ -267,6 +265,8 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev) ...@@ -267,6 +265,8 @@ static void mv643xx_eth_set_rx_mode(struct net_device *dev)
mp->port_config &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE; mp->port_config &= ~(u32) MV643XX_ETH_UNICAST_PROMISCUOUS_MODE;
mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), mp->port_config); mv_write(MV643XX_ETH_PORT_CONFIG_REG(mp->port_num), mp->port_config);
eth_port_set_multicast_list(dev);
} }
/* /*
...@@ -342,8 +342,6 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev, ...@@ -342,8 +342,6 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev,
if (!(eth_int_cause_ext & (BIT0 | BIT8))) if (!(eth_int_cause_ext & (BIT0 | BIT8)))
return released; return released;
spin_lock(&mp->lock);
/* Check only queue 0 */ /* Check only queue 0 */
while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
if (pkt_info.cmd_sts & BIT0) { if (pkt_info.cmd_sts & BIT0) {
...@@ -351,31 +349,21 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev, ...@@ -351,31 +349,21 @@ static int mv643xx_eth_free_tx_queue(struct net_device *dev,
stats->tx_errors++; stats->tx_errors++;
} }
/* if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC)
* If return_info is different than 0, release the skb. dma_unmap_single(NULL, pkt_info.buf_ptr,
* The case where return_info is not 0 is only in case pkt_info.byte_cnt,
* when transmitted a scatter/gather packet, where only DMA_TO_DEVICE);
* last skb releases the whole chain. else
*/ dma_unmap_page(NULL, pkt_info.buf_ptr,
if (pkt_info.return_info) { pkt_info.byte_cnt,
if (skb_shinfo(pkt_info.return_info)->nr_frags) DMA_TO_DEVICE);
dma_unmap_page(NULL, pkt_info.buf_ptr,
pkt_info.byte_cnt,
DMA_TO_DEVICE);
else
dma_unmap_single(NULL, pkt_info.buf_ptr,
pkt_info.byte_cnt,
DMA_TO_DEVICE);
if (pkt_info.return_info) {
dev_kfree_skb_irq(pkt_info.return_info); dev_kfree_skb_irq(pkt_info.return_info);
released = 0; released = 0;
} else }
dma_unmap_page(NULL, pkt_info.buf_ptr,
pkt_info.byte_cnt, DMA_TO_DEVICE);
} }
spin_unlock(&mp->lock);
return released; return released;
} }
...@@ -482,12 +470,12 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id, ...@@ -482,12 +470,12 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
/* Read interrupt cause registers */ /* Read interrupt cause registers */
eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) & eth_int_cause = mv_read(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num)) &
INT_CAUSE_UNMASK_ALL; INT_UNMASK_ALL;
if (eth_int_cause & BIT1) if (eth_int_cause & BIT1)
eth_int_cause_ext = mv_read( eth_int_cause_ext = mv_read(
MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) & MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num)) &
INT_CAUSE_UNMASK_ALL_EXT; INT_UNMASK_ALL_EXT;
#ifdef MV643XX_NAPI #ifdef MV643XX_NAPI
if (!(eth_int_cause & 0x0007fffd)) { if (!(eth_int_cause & 0x0007fffd)) {
...@@ -512,9 +500,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id, ...@@ -512,9 +500,10 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
} else { } else {
if (netif_rx_schedule_prep(dev)) { if (netif_rx_schedule_prep(dev)) {
/* Mask all the interrupts */ /* Mask all the interrupts */
mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0); mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG INT_MASK_ALL);
(port_num), 0); /* wait for previous write to complete */
mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
__netif_rx_schedule(dev); __netif_rx_schedule(dev);
} }
#else #else
...@@ -527,9 +516,12 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id, ...@@ -527,9 +516,12 @@ static irqreturn_t mv643xx_eth_int_handler(int irq, void *dev_id,
* with skb's. * with skb's.
*/ */
#ifdef MV643XX_RX_QUEUE_FILL_ON_TASK #ifdef MV643XX_RX_QUEUE_FILL_ON_TASK
/* Unmask all interrupts on ethernet port */ /* Mask all interrupts on ethernet port */
mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
INT_CAUSE_MASK_ALL); INT_MASK_ALL);
/* wait for previous write to take effect */
mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
queue_task(&mp->rx_task, &tq_immediate); queue_task(&mp->rx_task, &tq_immediate);
mark_bh(IMMEDIATE_BH); mark_bh(IMMEDIATE_BH);
#else #else
...@@ -635,56 +627,6 @@ static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num, ...@@ -635,56 +627,6 @@ static unsigned int eth_port_set_tx_coal(unsigned int eth_port_num,
return coal; return coal;
} }
/*
* mv643xx_eth_open
*
* This function is called when openning the network device. The function
* should initialize all the hardware, initialize cyclic Rx/Tx
* descriptors chain and buffers and allocate an IRQ to the network
* device.
*
* Input : a pointer to the network device structure
*
* Output : zero of success , nonzero if fails.
*/
static int mv643xx_eth_open(struct net_device *dev)
{
struct mv643xx_private *mp = netdev_priv(dev);
unsigned int port_num = mp->port_num;
int err;
spin_lock_irq(&mp->lock);
err = request_irq(dev->irq, mv643xx_eth_int_handler,
SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
if (err) {
printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n",
port_num);
err = -EAGAIN;
goto out;
}
if (mv643xx_eth_real_open(dev)) {
printk("%s: Error opening interface\n", dev->name);
err = -EBUSY;
goto out_free;
}
spin_unlock_irq(&mp->lock);
return 0;
out_free:
free_irq(dev->irq, dev);
out:
spin_unlock_irq(&mp->lock);
return err;
}
/* /*
* ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory. * ether_init_rx_desc_ring - Curve a Rx chain desc list and buffer in memory.
* *
...@@ -777,28 +719,37 @@ static void ether_init_tx_desc_ring(struct mv643xx_private *mp) ...@@ -777,28 +719,37 @@ static void ether_init_tx_desc_ring(struct mv643xx_private *mp)
mp->port_tx_queue_command |= 1; mp->port_tx_queue_command |= 1;
} }
/* Helper function for mv643xx_eth_open */ /*
static int mv643xx_eth_real_open(struct net_device *dev) * mv643xx_eth_open
*
* This function is called when openning the network device. The function
* should initialize all the hardware, initialize cyclic Rx/Tx
* descriptors chain and buffers and allocate an IRQ to the network
* device.
*
* Input : a pointer to the network device structure
*
* Output : zero of success , nonzero if fails.
*/
static int mv643xx_eth_open(struct net_device *dev)
{ {
struct mv643xx_private *mp = netdev_priv(dev); struct mv643xx_private *mp = netdev_priv(dev);
unsigned int port_num = mp->port_num; unsigned int port_num = mp->port_num;
unsigned int size; unsigned int size;
int err;
err = request_irq(dev->irq, mv643xx_eth_int_handler,
SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
if (err) {
printk(KERN_ERR "Can not assign IRQ number to MV643XX_eth%d\n",
port_num);
return -EAGAIN;
}
/* Stop RX Queues */ /* Stop RX Queues */
mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00); mv_write(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_REG(port_num), 0x0000ff00);
/* Clear the ethernet port interrupts */
mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
/* Unmask RX buffer and TX end interrupt */
mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
INT_CAUSE_UNMASK_ALL);
/* Unmask phy and link status changes interrupts */
mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
INT_CAUSE_UNMASK_ALL_EXT);
/* Set the MAC Address */ /* Set the MAC Address */
memcpy(mp->port_mac_addr, dev->dev_addr, 6); memcpy(mp->port_mac_addr, dev->dev_addr, 6);
...@@ -818,14 +769,15 @@ static int mv643xx_eth_real_open(struct net_device *dev) ...@@ -818,14 +769,15 @@ static int mv643xx_eth_real_open(struct net_device *dev)
GFP_KERNEL); GFP_KERNEL);
if (!mp->rx_skb) { if (!mp->rx_skb) {
printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name); printk(KERN_ERR "%s: Cannot allocate Rx skb ring\n", dev->name);
return -ENOMEM; err = -ENOMEM;
goto out_free_irq;
} }
mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size, mp->tx_skb = kmalloc(sizeof(*mp->tx_skb) * mp->tx_ring_size,
GFP_KERNEL); GFP_KERNEL);
if (!mp->tx_skb) { if (!mp->tx_skb) {
printk(KERN_ERR "%s: Cannot allocate Tx skb ring\n", dev->name); printk(KERN_ERR "%s: Cannot allocate Tx skb ring\n", dev->name);
kfree(mp->rx_skb); err = -ENOMEM;
return -ENOMEM; goto out_free_rx_skb;
} }
/* Allocate TX ring */ /* Allocate TX ring */
...@@ -845,9 +797,8 @@ static int mv643xx_eth_real_open(struct net_device *dev) ...@@ -845,9 +797,8 @@ static int mv643xx_eth_real_open(struct net_device *dev)
if (!mp->p_tx_desc_area) { if (!mp->p_tx_desc_area) {
printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n", printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
dev->name, size); dev->name, size);
kfree(mp->rx_skb); err = -ENOMEM;
kfree(mp->tx_skb); goto out_free_tx_skb;
return -ENOMEM;
} }
BUG_ON((u32) mp->p_tx_desc_area & 0xf); /* check 16-byte alignment */ BUG_ON((u32) mp->p_tx_desc_area & 0xf); /* check 16-byte alignment */
memset((void *)mp->p_tx_desc_area, 0, mp->tx_desc_area_size); memset((void *)mp->p_tx_desc_area, 0, mp->tx_desc_area_size);
...@@ -874,13 +825,12 @@ static int mv643xx_eth_real_open(struct net_device *dev) ...@@ -874,13 +825,12 @@ static int mv643xx_eth_real_open(struct net_device *dev)
printk(KERN_ERR "%s: Freeing previously allocated TX queues...", printk(KERN_ERR "%s: Freeing previously allocated TX queues...",
dev->name); dev->name);
if (mp->rx_sram_size) if (mp->rx_sram_size)
iounmap(mp->p_rx_desc_area); iounmap(mp->p_tx_desc_area);
else else
dma_free_coherent(NULL, mp->tx_desc_area_size, dma_free_coherent(NULL, mp->tx_desc_area_size,
mp->p_tx_desc_area, mp->tx_desc_dma); mp->p_tx_desc_area, mp->tx_desc_dma);
kfree(mp->rx_skb); err = -ENOMEM;
kfree(mp->tx_skb); goto out_free_tx_skb;
return -ENOMEM;
} }
memset((void *)mp->p_rx_desc_area, 0, size); memset((void *)mp->p_rx_desc_area, 0, size);
...@@ -900,9 +850,26 @@ static int mv643xx_eth_real_open(struct net_device *dev) ...@@ -900,9 +850,26 @@ static int mv643xx_eth_real_open(struct net_device *dev)
mp->tx_int_coal = mp->tx_int_coal =
eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL); eth_port_set_tx_coal(port_num, 133000000, MV643XX_TX_COAL);
netif_start_queue(dev); /* Clear any pending ethernet port interrupts */
mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
/* Unmask phy and link status changes interrupts */
mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
INT_UNMASK_ALL_EXT);
/* Unmask RX buffer and TX end interrupt */
mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL);
return 0; return 0;
out_free_tx_skb:
kfree(mp->tx_skb);
out_free_rx_skb:
kfree(mp->rx_skb);
out_free_irq:
free_irq(dev->irq, dev);
return err;
} }
static void mv643xx_eth_free_tx_rings(struct net_device *dev) static void mv643xx_eth_free_tx_rings(struct net_device *dev)
...@@ -910,14 +877,17 @@ static void mv643xx_eth_free_tx_rings(struct net_device *dev) ...@@ -910,14 +877,17 @@ static void mv643xx_eth_free_tx_rings(struct net_device *dev)
struct mv643xx_private *mp = netdev_priv(dev); struct mv643xx_private *mp = netdev_priv(dev);
unsigned int port_num = mp->port_num; unsigned int port_num = mp->port_num;
unsigned int curr; unsigned int curr;
struct sk_buff *skb;
/* Stop Tx Queues */ /* Stop Tx Queues */
mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 0x0000ff00); mv_write(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_REG(port_num), 0x0000ff00);
/* Free outstanding skb's on TX rings */ /* Free outstanding skb's on TX rings */
for (curr = 0; mp->tx_ring_skbs && curr < mp->tx_ring_size; curr++) { for (curr = 0; mp->tx_ring_skbs && curr < mp->tx_ring_size; curr++) {
if (mp->tx_skb[curr]) { skb = mp->tx_skb[curr];
dev_kfree_skb(mp->tx_skb[curr]); if (skb) {
mp->tx_ring_skbs -= skb_shinfo(skb)->nr_frags;
dev_kfree_skb(skb);
mp->tx_ring_skbs--; mp->tx_ring_skbs--;
} }
} }
...@@ -973,44 +943,32 @@ static void mv643xx_eth_free_rx_rings(struct net_device *dev) ...@@ -973,44 +943,32 @@ static void mv643xx_eth_free_rx_rings(struct net_device *dev)
* Output : zero if success , nonzero if fails * Output : zero if success , nonzero if fails
*/ */
/* Helper function for mv643xx_eth_stop */ static int mv643xx_eth_stop(struct net_device *dev)
static int mv643xx_eth_real_stop(struct net_device *dev)
{ {
struct mv643xx_private *mp = netdev_priv(dev); struct mv643xx_private *mp = netdev_priv(dev);
unsigned int port_num = mp->port_num; unsigned int port_num = mp->port_num;
/* Mask all interrupts on ethernet port */
mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL);
/* wait for previous write to complete */
mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
#ifdef MV643XX_NAPI
netif_poll_disable(dev);
#endif
netif_carrier_off(dev); netif_carrier_off(dev);
netif_stop_queue(dev); netif_stop_queue(dev);
mv643xx_eth_free_tx_rings(dev);
mv643xx_eth_free_rx_rings(dev);
eth_port_reset(mp->port_num); eth_port_reset(mp->port_num);
/* Disable ethernet port interrupts */ mv643xx_eth_free_tx_rings(dev);
mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); mv643xx_eth_free_rx_rings(dev);
mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
/* Mask RX buffer and TX end interrupt */
mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), 0);
/* Mask phy and link status changes interrupts */
mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num), 0);
return 0;
}
static int mv643xx_eth_stop(struct net_device *dev)
{
struct mv643xx_private *mp = netdev_priv(dev);
spin_lock_irq(&mp->lock);
mv643xx_eth_real_stop(dev); #ifdef MV643XX_NAPI
netif_poll_enable(dev);
#endif
free_irq(dev->irq, dev); free_irq(dev->irq, dev);
spin_unlock_irq(&mp->lock);
return 0; return 0;
} }
...@@ -1022,20 +980,17 @@ static void mv643xx_tx(struct net_device *dev) ...@@ -1022,20 +980,17 @@ static void mv643xx_tx(struct net_device *dev)
struct pkt_info pkt_info; struct pkt_info pkt_info;
while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) { while (eth_tx_return_desc(mp, &pkt_info) == ETH_OK) {
if (pkt_info.return_info) { if (pkt_info.cmd_sts & ETH_TX_FIRST_DESC)
if (skb_shinfo(pkt_info.return_info)->nr_frags) dma_unmap_single(NULL, pkt_info.buf_ptr,
dma_unmap_page(NULL, pkt_info.buf_ptr, pkt_info.byte_cnt,
pkt_info.byte_cnt, DMA_TO_DEVICE);
DMA_TO_DEVICE); else
else dma_unmap_page(NULL, pkt_info.buf_ptr,
dma_unmap_single(NULL, pkt_info.buf_ptr, pkt_info.byte_cnt,
pkt_info.byte_cnt, DMA_TO_DEVICE);
DMA_TO_DEVICE);
if (pkt_info.return_info)
dev_kfree_skb_irq(pkt_info.return_info); dev_kfree_skb_irq(pkt_info.return_info);
} else
dma_unmap_page(NULL, pkt_info.buf_ptr,
pkt_info.byte_cnt, DMA_TO_DEVICE);
} }
if (netif_queue_stopped(dev) && if (netif_queue_stopped(dev) &&
...@@ -1053,14 +1008,11 @@ static int mv643xx_poll(struct net_device *dev, int *budget) ...@@ -1053,14 +1008,11 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
struct mv643xx_private *mp = netdev_priv(dev); struct mv643xx_private *mp = netdev_priv(dev);
int done = 1, orig_budget, work_done; int done = 1, orig_budget, work_done;
unsigned int port_num = mp->port_num; unsigned int port_num = mp->port_num;
unsigned long flags;
#ifdef MV643XX_TX_FAST_REFILL #ifdef MV643XX_TX_FAST_REFILL
if (++mp->tx_clean_threshold > 5) { if (++mp->tx_clean_threshold > 5) {
spin_lock_irqsave(&mp->lock, flags);
mv643xx_tx(dev); mv643xx_tx(dev);
mp->tx_clean_threshold = 0; mp->tx_clean_threshold = 0;
spin_unlock_irqrestore(&mp->lock, flags);
} }
#endif #endif
...@@ -1078,21 +1030,36 @@ static int mv643xx_poll(struct net_device *dev, int *budget) ...@@ -1078,21 +1030,36 @@ static int mv643xx_poll(struct net_device *dev, int *budget)
} }
if (done) { if (done) {
spin_lock_irqsave(&mp->lock, flags); netif_rx_complete(dev);
__netif_rx_complete(dev);
mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0); mv_write(MV643XX_ETH_INTERRUPT_CAUSE_REG(port_num), 0);
mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0); mv_write(MV643XX_ETH_INTERRUPT_CAUSE_EXTEND_REG(port_num), 0);
mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
INT_CAUSE_UNMASK_ALL); INT_UNMASK_ALL);
mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
INT_CAUSE_UNMASK_ALL_EXT);
spin_unlock_irqrestore(&mp->lock, flags);
} }
return done ? 0 : 1; return done ? 0 : 1;
} }
#endif #endif
/* Hardware can't handle unaligned fragments smaller than 9 bytes.
* This helper function detects that case.
*/
static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
{
unsigned int frag;
skb_frag_t *fragp;
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
fragp = &skb_shinfo(skb)->frags[frag];
if (fragp->size <= 8 && fragp->page_offset & 0x7)
return 1;
}
return 0;
}
/* /*
* mv643xx_eth_start_xmit * mv643xx_eth_start_xmit
* *
...@@ -1136,12 +1103,19 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1136,12 +1103,19 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
return 1; return 1;
} }
#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
if (has_tiny_unaligned_frags(skb)) {
if ((skb_linearize(skb, GFP_ATOMIC) != 0)) {
stats->tx_dropped++;
printk(KERN_DEBUG "%s: failed to linearize tiny "
"unaligned fragment\n", dev->name);
return 1;
}
}
spin_lock_irqsave(&mp->lock, flags); spin_lock_irqsave(&mp->lock, flags);
/* Update packet info data structure -- DMA owned, first last */
#ifdef MV643XX_CHECKSUM_OFFLOAD_TX
if (!skb_shinfo(skb)->nr_frags) { if (!skb_shinfo(skb)->nr_frags) {
linear:
if (skb->ip_summed != CHECKSUM_HW) { if (skb->ip_summed != CHECKSUM_HW) {
/* Errata BTS #50, IHL must be 5 if no HW checksum */ /* Errata BTS #50, IHL must be 5 if no HW checksum */
pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
...@@ -1150,7 +1124,6 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1150,7 +1124,6 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
5 << ETH_TX_IHL_SHIFT; 5 << ETH_TX_IHL_SHIFT;
pkt_info.l4i_chk = 0; pkt_info.l4i_chk = 0;
} else { } else {
pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT |
ETH_TX_FIRST_DESC | ETH_TX_FIRST_DESC |
ETH_TX_LAST_DESC | ETH_TX_LAST_DESC |
...@@ -1158,14 +1131,16 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1158,14 +1131,16 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
ETH_GEN_IP_V_4_CHECKSUM | ETH_GEN_IP_V_4_CHECKSUM |
skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
/* CPU already calculated pseudo header checksum. */ /* CPU already calculated pseudo header checksum. */
if (skb->nh.iph->protocol == IPPROTO_UDP) { if ((skb->protocol == ETH_P_IP) &&
(skb->nh.iph->protocol == IPPROTO_UDP) ) {
pkt_info.cmd_sts |= ETH_UDP_FRAME; pkt_info.cmd_sts |= ETH_UDP_FRAME;
pkt_info.l4i_chk = skb->h.uh->check; pkt_info.l4i_chk = skb->h.uh->check;
} else if (skb->nh.iph->protocol == IPPROTO_TCP) } else if ((skb->protocol == ETH_P_IP) &&
(skb->nh.iph->protocol == IPPROTO_TCP))
pkt_info.l4i_chk = skb->h.th->check; pkt_info.l4i_chk = skb->h.th->check;
else { else {
printk(KERN_ERR printk(KERN_ERR
"%s: chksum proto != TCP or UDP\n", "%s: chksum proto != IPv4 TCP or UDP\n",
dev->name); dev->name);
spin_unlock_irqrestore(&mp->lock, flags); spin_unlock_irqrestore(&mp->lock, flags);
return 1; return 1;
...@@ -1183,26 +1158,6 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1183,26 +1158,6 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
} else { } else {
unsigned int frag; unsigned int frag;
/* Since hardware can't handle unaligned fragments smaller
* than 9 bytes, if we find any, we linearize the skb
* and start again. When I've seen it, it's always been
* the first frag (probably near the end of the page),
* but we check all frags to be safe.
*/
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
skb_frag_t *fragp;
fragp = &skb_shinfo(skb)->frags[frag];
if (fragp->size <= 8 && fragp->page_offset & 0x7) {
skb_linearize(skb, GFP_ATOMIC);
printk(KERN_DEBUG "%s: unaligned tiny fragment"
"%d of %d, fixed\n",
dev->name, frag,
skb_shinfo(skb)->nr_frags);
goto linear;
}
}
/* first frag which is skb header */ /* first frag which is skb header */
pkt_info.byte_cnt = skb_headlen(skb); pkt_info.byte_cnt = skb_headlen(skb);
pkt_info.buf_ptr = dma_map_single(NULL, skb->data, pkt_info.buf_ptr = dma_map_single(NULL, skb->data,
...@@ -1221,14 +1176,16 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1221,14 +1176,16 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
ETH_GEN_IP_V_4_CHECKSUM | ETH_GEN_IP_V_4_CHECKSUM |
skb->nh.iph->ihl << ETH_TX_IHL_SHIFT; skb->nh.iph->ihl << ETH_TX_IHL_SHIFT;
/* CPU already calculated pseudo header checksum. */ /* CPU already calculated pseudo header checksum. */
if (skb->nh.iph->protocol == IPPROTO_UDP) { if ((skb->protocol == ETH_P_IP) &&
(skb->nh.iph->protocol == IPPROTO_UDP)) {
pkt_info.cmd_sts |= ETH_UDP_FRAME; pkt_info.cmd_sts |= ETH_UDP_FRAME;
pkt_info.l4i_chk = skb->h.uh->check; pkt_info.l4i_chk = skb->h.uh->check;
} else if (skb->nh.iph->protocol == IPPROTO_TCP) } else if ((skb->protocol == ETH_P_IP) &&
(skb->nh.iph->protocol == IPPROTO_TCP))
pkt_info.l4i_chk = skb->h.th->check; pkt_info.l4i_chk = skb->h.th->check;
else { else {
printk(KERN_ERR printk(KERN_ERR
"%s: chksum proto != TCP or UDP\n", "%s: chksum proto != IPv4 TCP or UDP\n",
dev->name); dev->name);
spin_unlock_irqrestore(&mp->lock, flags); spin_unlock_irqrestore(&mp->lock, flags);
return 1; return 1;
...@@ -1288,6 +1245,8 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1288,6 +1245,8 @@ static int mv643xx_eth_start_xmit(struct sk_buff *skb, struct net_device *dev)
} }
} }
#else #else
spin_lock_irqsave(&mp->lock, flags);
pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC | pkt_info.cmd_sts = ETH_TX_ENABLE_INTERRUPT | ETH_TX_FIRST_DESC |
ETH_TX_LAST_DESC; ETH_TX_LAST_DESC;
pkt_info.l4i_chk = 0; pkt_info.l4i_chk = 0;
...@@ -1340,39 +1299,18 @@ static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev) ...@@ -1340,39 +1299,18 @@ static struct net_device_stats *mv643xx_eth_get_stats(struct net_device *dev)
} }
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
static inline void mv643xx_enable_irq(struct mv643xx_private *mp)
{
int port_num = mp->port_num;
unsigned long flags;
spin_lock_irqsave(&mp->lock, flags);
mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
INT_CAUSE_UNMASK_ALL);
mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
INT_CAUSE_UNMASK_ALL_EXT);
spin_unlock_irqrestore(&mp->lock, flags);
}
static inline void mv643xx_disable_irq(struct mv643xx_private *mp)
{
int port_num = mp->port_num;
unsigned long flags;
spin_lock_irqsave(&mp->lock, flags);
mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num),
INT_CAUSE_MASK_ALL);
mv_write(MV643XX_ETH_INTERRUPT_EXTEND_MASK_REG(port_num),
INT_CAUSE_MASK_ALL_EXT);
spin_unlock_irqrestore(&mp->lock, flags);
}
static void mv643xx_netpoll(struct net_device *netdev) static void mv643xx_netpoll(struct net_device *netdev)
{ {
struct mv643xx_private *mp = netdev_priv(netdev); struct mv643xx_private *mp = netdev_priv(netdev);
int port_num = mp->port_num;
mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_MASK_ALL);
/* wait for previous write to complete */
mv_read(MV643XX_ETH_INTERRUPT_MASK_REG(port_num));
mv643xx_disable_irq(mp);
mv643xx_eth_int_handler(netdev->irq, netdev, NULL); mv643xx_eth_int_handler(netdev->irq, netdev, NULL);
mv643xx_enable_irq(mp);
mv_write(MV643XX_ETH_INTERRUPT_MASK_REG(port_num), INT_UNMASK_ALL);
} }
#endif #endif
...@@ -1441,7 +1379,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev) ...@@ -1441,7 +1379,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
* Zero copy can only work if we use Discovery II memory. Else, we will * Zero copy can only work if we use Discovery II memory. Else, we will
* have to map the buffers to ISA memory which is only 16 MB * have to map the buffers to ISA memory which is only 16 MB
*/ */
dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_HW_CSUM; dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
#endif #endif
#endif #endif
...@@ -2053,6 +1991,196 @@ static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble, ...@@ -2053,6 +1991,196 @@ static int eth_port_uc_addr(unsigned int eth_port_num, unsigned char uc_nibble,
return 1; return 1;
} }
/*
* The entries in each table are indexed by a hash of a packet's MAC
* address. One bit in each entry determines whether the packet is
* accepted. There are 4 entries (each 8 bits wide) in each register
* of the table. The bits in each entry are defined as follows:
* 0 Accept=1, Drop=0
* 3-1 Queue (ETH_Q0=0)
* 7-4 Reserved = 0;
*/
static void eth_port_set_filter_table_entry(int table, unsigned char entry)
{
unsigned int table_reg;
unsigned int tbl_offset;
unsigned int reg_offset;
tbl_offset = (entry / 4) * 4; /* Register offset of DA table entry */
reg_offset = entry % 4; /* Entry offset within the register */
/* Set "accepts frame bit" at specified table entry */
table_reg = mv_read(table + tbl_offset);
table_reg |= 0x01 << (8 * reg_offset);
mv_write(table + tbl_offset, table_reg);
}
/*
* eth_port_mc_addr - Multicast address settings.
*
* The MV device supports multicast using two tables:
* 1) Special Multicast Table for MAC addresses of the form
* 0x01-00-5E-00-00-XX (where XX is between 0x00 and 0x_FF).
* The MAC DA[7:0] bits are used as a pointer to the Special Multicast
* Table entries in the DA-Filter table.
* 2) Other Multicast Table for multicast of another type. A CRC-8bit
* is used as an index to the Other Multicast Table entries in the
* DA-Filter table. This function calculates the CRC-8bit value.
* In either case, eth_port_set_filter_table_entry() is then called
* to set to set the actual table entry.
*/
static void eth_port_mc_addr(unsigned int eth_port_num, unsigned char *p_addr)
{
unsigned int mac_h;
unsigned int mac_l;
unsigned char crc_result = 0;
int table;
int mac_array[48];
int crc[8];
int i;
if ((p_addr[0] == 0x01) && (p_addr[1] == 0x00) &&
(p_addr[2] == 0x5E) && (p_addr[3] == 0x00) && (p_addr[4] == 0x00)) {
table = MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
(eth_port_num);
eth_port_set_filter_table_entry(table, p_addr[5]);
return;
}
/* Calculate CRC-8 out of the given address */
mac_h = (p_addr[0] << 8) | (p_addr[1]);
mac_l = (p_addr[2] << 24) | (p_addr[3] << 16) |
(p_addr[4] << 8) | (p_addr[5] << 0);
for (i = 0; i < 32; i++)
mac_array[i] = (mac_l >> i) & 0x1;
for (i = 32; i < 48; i++)
mac_array[i] = (mac_h >> (i - 32)) & 0x1;
crc[0] = mac_array[45] ^ mac_array[43] ^ mac_array[40] ^ mac_array[39] ^
mac_array[35] ^ mac_array[34] ^ mac_array[31] ^ mac_array[30] ^
mac_array[28] ^ mac_array[23] ^ mac_array[21] ^ mac_array[19] ^
mac_array[18] ^ mac_array[16] ^ mac_array[14] ^ mac_array[12] ^
mac_array[8] ^ mac_array[7] ^ mac_array[6] ^ mac_array[0];
crc[1] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
mac_array[41] ^ mac_array[39] ^ mac_array[36] ^ mac_array[34] ^
mac_array[32] ^ mac_array[30] ^ mac_array[29] ^ mac_array[28] ^
mac_array[24] ^ mac_array[23] ^ mac_array[22] ^ mac_array[21] ^
mac_array[20] ^ mac_array[18] ^ mac_array[17] ^ mac_array[16] ^
mac_array[15] ^ mac_array[14] ^ mac_array[13] ^ mac_array[12] ^
mac_array[9] ^ mac_array[6] ^ mac_array[1] ^ mac_array[0];
crc[2] = mac_array[47] ^ mac_array[46] ^ mac_array[44] ^ mac_array[43] ^
mac_array[42] ^ mac_array[39] ^ mac_array[37] ^ mac_array[34] ^
mac_array[33] ^ mac_array[29] ^ mac_array[28] ^ mac_array[25] ^
mac_array[24] ^ mac_array[22] ^ mac_array[17] ^ mac_array[15] ^
mac_array[13] ^ mac_array[12] ^ mac_array[10] ^ mac_array[8] ^
mac_array[6] ^ mac_array[2] ^ mac_array[1] ^ mac_array[0];
crc[3] = mac_array[47] ^ mac_array[45] ^ mac_array[44] ^ mac_array[43] ^
mac_array[40] ^ mac_array[38] ^ mac_array[35] ^ mac_array[34] ^
mac_array[30] ^ mac_array[29] ^ mac_array[26] ^ mac_array[25] ^
mac_array[23] ^ mac_array[18] ^ mac_array[16] ^ mac_array[14] ^
mac_array[13] ^ mac_array[11] ^ mac_array[9] ^ mac_array[7] ^
mac_array[3] ^ mac_array[2] ^ mac_array[1];
crc[4] = mac_array[46] ^ mac_array[45] ^ mac_array[44] ^ mac_array[41] ^
mac_array[39] ^ mac_array[36] ^ mac_array[35] ^ mac_array[31] ^
mac_array[30] ^ mac_array[27] ^ mac_array[26] ^ mac_array[24] ^
mac_array[19] ^ mac_array[17] ^ mac_array[15] ^ mac_array[14] ^
mac_array[12] ^ mac_array[10] ^ mac_array[8] ^ mac_array[4] ^
mac_array[3] ^ mac_array[2];
crc[5] = mac_array[47] ^ mac_array[46] ^ mac_array[45] ^ mac_array[42] ^
mac_array[40] ^ mac_array[37] ^ mac_array[36] ^ mac_array[32] ^
mac_array[31] ^ mac_array[28] ^ mac_array[27] ^ mac_array[25] ^
mac_array[20] ^ mac_array[18] ^ mac_array[16] ^ mac_array[15] ^
mac_array[13] ^ mac_array[11] ^ mac_array[9] ^ mac_array[5] ^
mac_array[4] ^ mac_array[3];
crc[6] = mac_array[47] ^ mac_array[46] ^ mac_array[43] ^ mac_array[41] ^
mac_array[38] ^ mac_array[37] ^ mac_array[33] ^ mac_array[32] ^
mac_array[29] ^ mac_array[28] ^ mac_array[26] ^ mac_array[21] ^
mac_array[19] ^ mac_array[17] ^ mac_array[16] ^ mac_array[14] ^
mac_array[12] ^ mac_array[10] ^ mac_array[6] ^ mac_array[5] ^
mac_array[4];
crc[7] = mac_array[47] ^ mac_array[44] ^ mac_array[42] ^ mac_array[39] ^
mac_array[38] ^ mac_array[34] ^ mac_array[33] ^ mac_array[30] ^
mac_array[29] ^ mac_array[27] ^ mac_array[22] ^ mac_array[20] ^
mac_array[18] ^ mac_array[17] ^ mac_array[15] ^ mac_array[13] ^
mac_array[11] ^ mac_array[7] ^ mac_array[6] ^ mac_array[5];
for (i = 0; i < 8; i++)
crc_result = crc_result | (crc[i] << i);
table = MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num);
eth_port_set_filter_table_entry(table, crc_result);
}
/*
* Set the entire multicast list based on dev->mc_list.
*/
static void eth_port_set_multicast_list(struct net_device *dev)
{
struct dev_mc_list *mc_list;
int i;
int table_index;
struct mv643xx_private *mp = netdev_priv(dev);
unsigned int eth_port_num = mp->port_num;
/* If the device is in promiscuous mode or in all multicast mode,
* we will fully populate both multicast tables with accept.
* This is guaranteed to yield a match on all multicast addresses...
*/
if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI)) {
for (table_index = 0; table_index <= 0xFC; table_index += 4) {
/* Set all entries in DA filter special multicast
* table (Ex_dFSMT)
* Set for ETH_Q0 for now
* Bits
* 0 Accept=1, Drop=0
* 3-1 Queue ETH_Q0=0
* 7-4 Reserved = 0;
*/
mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
/* Set all entries in DA filter other multicast
* table (Ex_dFOMT)
* Set for ETH_Q0 for now
* Bits
* 0 Accept=1, Drop=0
* 3-1 Queue ETH_Q0=0
* 7-4 Reserved = 0;
*/
mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE(eth_port_num) + table_index, 0x01010101);
}
return;
}
/* We will clear out multicast tables every time we get the list.
* Then add the entire new list...
*/
for (table_index = 0; table_index <= 0xFC; table_index += 4) {
/* Clear DA filter special multicast table (Ex_dFSMT) */
mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
(eth_port_num) + table_index, 0);
/* Clear DA filter other multicast table (Ex_dFOMT) */
mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
(eth_port_num) + table_index, 0);
}
/* Get pointer to net_device multicast list and add each one... */
for (i = 0, mc_list = dev->mc_list;
(i < 256) && (mc_list != NULL) && (i < dev->mc_count);
i++, mc_list = mc_list->next)
if (mc_list->dmi_addrlen == 6)
eth_port_mc_addr(eth_port_num, mc_list->dmi_addr);
}
/* /*
* eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables * eth_port_init_mac_tables - Clear all entrance in the UC, SMC and OMC tables
* *
...@@ -2080,11 +2208,11 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num) ...@@ -2080,11 +2208,11 @@ static void eth_port_init_mac_tables(unsigned int eth_port_num)
for (table_index = 0; table_index <= 0xFC; table_index += 4) { for (table_index = 0; table_index <= 0xFC; table_index += 4) {
/* Clear DA filter special multicast table (Ex_dFSMT) */ /* Clear DA filter special multicast table (Ex_dFSMT) */
mv_write((MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE mv_write(MV643XX_ETH_DA_FILTER_SPECIAL_MULTICAST_TABLE_BASE
(eth_port_num) + table_index), 0); (eth_port_num) + table_index, 0);
/* Clear DA filter other multicast table (Ex_dFOMT) */ /* Clear DA filter other multicast table (Ex_dFOMT) */
mv_write((MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE mv_write(MV643XX_ETH_DA_FILTER_OTHER_MULTICAST_TABLE_BASE
(eth_port_num) + table_index), 0); (eth_port_num) + table_index, 0);
} }
} }
...@@ -2489,6 +2617,7 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, ...@@ -2489,6 +2617,7 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
struct eth_tx_desc *current_descriptor; struct eth_tx_desc *current_descriptor;
struct eth_tx_desc *first_descriptor; struct eth_tx_desc *first_descriptor;
u32 command; u32 command;
unsigned long flags;
/* Do not process Tx ring in case of Tx ring resource error */ /* Do not process Tx ring in case of Tx ring resource error */
if (mp->tx_resource_err) if (mp->tx_resource_err)
...@@ -2505,6 +2634,8 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, ...@@ -2505,6 +2634,8 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
return ETH_ERROR; return ETH_ERROR;
} }
spin_lock_irqsave(&mp->lock, flags);
mp->tx_ring_skbs++; mp->tx_ring_skbs++;
BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size); BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size);
...@@ -2554,11 +2685,15 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, ...@@ -2554,11 +2685,15 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
mp->tx_resource_err = 1; mp->tx_resource_err = 1;
mp->tx_curr_desc_q = tx_first_desc; mp->tx_curr_desc_q = tx_first_desc;
spin_unlock_irqrestore(&mp->lock, flags);
return ETH_QUEUE_LAST_RESOURCE; return ETH_QUEUE_LAST_RESOURCE;
} }
mp->tx_curr_desc_q = tx_next_desc; mp->tx_curr_desc_q = tx_next_desc;
spin_unlock_irqrestore(&mp->lock, flags);
return ETH_OK; return ETH_OK;
} }
#else #else
...@@ -2569,11 +2704,14 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, ...@@ -2569,11 +2704,14 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
int tx_desc_used; int tx_desc_used;
struct eth_tx_desc *current_descriptor; struct eth_tx_desc *current_descriptor;
unsigned int command_status; unsigned int command_status;
unsigned long flags;
/* Do not process Tx ring in case of Tx ring resource error */ /* Do not process Tx ring in case of Tx ring resource error */
if (mp->tx_resource_err) if (mp->tx_resource_err)
return ETH_QUEUE_FULL; return ETH_QUEUE_FULL;
spin_lock_irqsave(&mp->lock, flags);
mp->tx_ring_skbs++; mp->tx_ring_skbs++;
BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size); BUG_ON(mp->tx_ring_skbs > mp->tx_ring_size);
...@@ -2604,9 +2742,12 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, ...@@ -2604,9 +2742,12 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
/* Check for ring index overlap in the Tx desc ring */ /* Check for ring index overlap in the Tx desc ring */
if (tx_desc_curr == tx_desc_used) { if (tx_desc_curr == tx_desc_used) {
mp->tx_resource_err = 1; mp->tx_resource_err = 1;
spin_unlock_irqrestore(&mp->lock, flags);
return ETH_QUEUE_LAST_RESOURCE; return ETH_QUEUE_LAST_RESOURCE;
} }
spin_unlock_irqrestore(&mp->lock, flags);
return ETH_OK; return ETH_OK;
} }
#endif #endif
...@@ -2629,23 +2770,27 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp, ...@@ -2629,23 +2770,27 @@ static ETH_FUNC_RET_STATUS eth_port_send(struct mv643xx_private *mp,
* Tx ring 'first' and 'used' indexes are updated. * Tx ring 'first' and 'used' indexes are updated.
* *
* RETURN: * RETURN:
* ETH_ERROR in case the routine can not access Tx desc ring. * ETH_OK on success
* ETH_RETRY in case there is transmission in process. * ETH_ERROR otherwise.
* ETH_END_OF_JOB if the routine has nothing to release.
* ETH_OK otherwise.
* *
*/ */
static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp, static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
struct pkt_info *p_pkt_info) struct pkt_info *p_pkt_info)
{ {
int tx_desc_used; int tx_desc_used;
int tx_busy_desc;
struct eth_tx_desc *p_tx_desc_used;
unsigned int command_status;
unsigned long flags;
int err = ETH_OK;
spin_lock_irqsave(&mp->lock, flags);
#ifdef MV643XX_CHECKSUM_OFFLOAD_TX #ifdef MV643XX_CHECKSUM_OFFLOAD_TX
int tx_busy_desc = mp->tx_first_desc_q; tx_busy_desc = mp->tx_first_desc_q;
#else #else
int tx_busy_desc = mp->tx_curr_desc_q; tx_busy_desc = mp->tx_curr_desc_q;
#endif #endif
struct eth_tx_desc *p_tx_desc_used;
unsigned int command_status;
/* Get the Tx Desc ring indexes */ /* Get the Tx Desc ring indexes */
tx_desc_used = mp->tx_used_desc_q; tx_desc_used = mp->tx_used_desc_q;
...@@ -2653,22 +2798,30 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp, ...@@ -2653,22 +2798,30 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used]; p_tx_desc_used = &mp->p_tx_desc_area[tx_desc_used];
/* Sanity check */ /* Sanity check */
if (p_tx_desc_used == NULL) if (p_tx_desc_used == NULL) {
return ETH_ERROR; err = ETH_ERROR;
goto out;
}
/* Stop release. About to overlap the current available Tx descriptor */ /* Stop release. About to overlap the current available Tx descriptor */
if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err) if (tx_desc_used == tx_busy_desc && !mp->tx_resource_err) {
return ETH_END_OF_JOB; err = ETH_ERROR;
goto out;
}
command_status = p_tx_desc_used->cmd_sts; command_status = p_tx_desc_used->cmd_sts;
/* Still transmitting... */ /* Still transmitting... */
if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
return ETH_RETRY; err = ETH_ERROR;
goto out;
}
/* Pass the packet information to the caller */ /* Pass the packet information to the caller */
p_pkt_info->cmd_sts = command_status; p_pkt_info->cmd_sts = command_status;
p_pkt_info->return_info = mp->tx_skb[tx_desc_used]; p_pkt_info->return_info = mp->tx_skb[tx_desc_used];
p_pkt_info->buf_ptr = p_tx_desc_used->buf_ptr;
p_pkt_info->byte_cnt = p_tx_desc_used->byte_cnt;
mp->tx_skb[tx_desc_used] = NULL; mp->tx_skb[tx_desc_used] = NULL;
/* Update the next descriptor to release. */ /* Update the next descriptor to release. */
...@@ -2680,7 +2833,10 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp, ...@@ -2680,7 +2833,10 @@ static ETH_FUNC_RET_STATUS eth_tx_return_desc(struct mv643xx_private *mp,
BUG_ON(mp->tx_ring_skbs == 0); BUG_ON(mp->tx_ring_skbs == 0);
mp->tx_ring_skbs--; mp->tx_ring_skbs--;
return ETH_OK; out:
spin_unlock_irqrestore(&mp->lock, flags);
return err;
} }
/* /*
...@@ -2712,11 +2868,14 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, ...@@ -2712,11 +2868,14 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
int rx_next_curr_desc, rx_curr_desc, rx_used_desc; int rx_next_curr_desc, rx_curr_desc, rx_used_desc;
volatile struct eth_rx_desc *p_rx_desc; volatile struct eth_rx_desc *p_rx_desc;
unsigned int command_status; unsigned int command_status;
unsigned long flags;
/* Do not process Rx ring in case of Rx ring resource error */ /* Do not process Rx ring in case of Rx ring resource error */
if (mp->rx_resource_err) if (mp->rx_resource_err)
return ETH_QUEUE_FULL; return ETH_QUEUE_FULL;
spin_lock_irqsave(&mp->lock, flags);
/* Get the Rx Desc ring 'curr and 'used' indexes */ /* Get the Rx Desc ring 'curr and 'used' indexes */
rx_curr_desc = mp->rx_curr_desc_q; rx_curr_desc = mp->rx_curr_desc_q;
rx_used_desc = mp->rx_used_desc_q; rx_used_desc = mp->rx_used_desc_q;
...@@ -2728,8 +2887,10 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, ...@@ -2728,8 +2887,10 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
rmb(); rmb();
/* Nothing to receive... */ /* Nothing to receive... */
if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) if (command_status & (ETH_BUFFER_OWNED_BY_DMA)) {
spin_unlock_irqrestore(&mp->lock, flags);
return ETH_END_OF_JOB; return ETH_END_OF_JOB;
}
p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET; p_pkt_info->byte_cnt = (p_rx_desc->byte_cnt) - RX_BUF_OFFSET;
p_pkt_info->cmd_sts = command_status; p_pkt_info->cmd_sts = command_status;
...@@ -2749,6 +2910,8 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp, ...@@ -2749,6 +2910,8 @@ static ETH_FUNC_RET_STATUS eth_port_receive(struct mv643xx_private *mp,
if (rx_next_curr_desc == rx_used_desc) if (rx_next_curr_desc == rx_used_desc)
mp->rx_resource_err = 1; mp->rx_resource_err = 1;
spin_unlock_irqrestore(&mp->lock, flags);
return ETH_OK; return ETH_OK;
} }
...@@ -2777,6 +2940,9 @@ static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, ...@@ -2777,6 +2940,9 @@ static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
{ {
int used_rx_desc; /* Where to return Rx resource */ int used_rx_desc; /* Where to return Rx resource */
volatile struct eth_rx_desc *p_used_rx_desc; volatile struct eth_rx_desc *p_used_rx_desc;
unsigned long flags;
spin_lock_irqsave(&mp->lock, flags);
/* Get 'used' Rx descriptor */ /* Get 'used' Rx descriptor */
used_rx_desc = mp->rx_used_desc_q; used_rx_desc = mp->rx_used_desc_q;
...@@ -2800,6 +2966,8 @@ static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp, ...@@ -2800,6 +2966,8 @@ static ETH_FUNC_RET_STATUS eth_rx_return_buff(struct mv643xx_private *mp,
/* Any Rx return cancels the Rx resource error status */ /* Any Rx return cancels the Rx resource error status */
mp->rx_resource_err = 0; mp->rx_resource_err = 0;
spin_unlock_irqrestore(&mp->lock, flags);
return ETH_OK; return ETH_OK;
} }
......
...@@ -3243,12 +3243,22 @@ static int __devinit skge_probe(struct pci_dev *pdev, ...@@ -3243,12 +3243,22 @@ static int __devinit skge_probe(struct pci_dev *pdev,
pci_set_master(pdev); pci_set_master(pdev);
if (!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) if (sizeof(dma_addr_t) > sizeof(u32) &&
!(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
using_dac = 1; using_dac = 1;
else if (!(err = pci_set_dma_mask(pdev, DMA_32BIT_MASK))) { err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
printk(KERN_ERR PFX "%s no usable DMA configuration\n", if (err < 0) {
pci_name(pdev)); printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA "
goto err_out_free_regions; "for consistent allocations\n", pci_name(pdev));
goto err_out_free_regions;
}
} else {
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (err) {
printk(KERN_ERR PFX "%s no usable DMA configuration\n",
pci_name(pdev));
goto err_out_free_regions;
}
} }
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
......
...@@ -57,7 +57,7 @@ ...@@ -57,7 +57,7 @@
#include "sky2.h" #include "sky2.h"
#define DRV_NAME "sky2" #define DRV_NAME "sky2"
#define DRV_VERSION "0.11" #define DRV_VERSION "0.13"
#define PFX DRV_NAME " " #define PFX DRV_NAME " "
/* /*
...@@ -75,6 +75,7 @@ ...@@ -75,6 +75,7 @@
#define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le)) #define RX_LE_BYTES (RX_LE_SIZE*sizeof(struct sky2_rx_le))
#define RX_MAX_PENDING (RX_LE_SIZE/2 - 2) #define RX_MAX_PENDING (RX_LE_SIZE/2 - 2)
#define RX_DEF_PENDING RX_MAX_PENDING #define RX_DEF_PENDING RX_MAX_PENDING
#define RX_SKB_ALIGN 8
#define TX_RING_SIZE 512 #define TX_RING_SIZE 512
#define TX_DEF_PENDING (TX_RING_SIZE - 1) #define TX_DEF_PENDING (TX_RING_SIZE - 1)
...@@ -91,7 +92,7 @@ ...@@ -91,7 +92,7 @@
static const u32 default_msg = static const u32 default_msg =
NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK
| NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR | NETIF_MSG_TIMER | NETIF_MSG_TX_ERR | NETIF_MSG_RX_ERR
| NETIF_MSG_IFUP | NETIF_MSG_IFDOWN | NETIF_MSG_INTR; | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN;
static int debug = -1; /* defaults above */ static int debug = -1; /* defaults above */
module_param(debug, int, 0); module_param(debug, int, 0);
...@@ -624,13 +625,16 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) ...@@ -624,13 +625,16 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
} }
static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len) /* Assign Ram Buffer allocation.
* start and end are in units of 4k bytes
* ram registers are in units of 64bit words
*/
static void sky2_ramset(struct sky2_hw *hw, u16 q, u8 startk, u8 endk)
{ {
u32 end; u32 start, end;
start /= 8; start = startk * 4096/8;
len /= 8; end = (endk * 4096/8) - 1;
end = start + len - 1;
sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR); sky2_write8(hw, RB_ADDR(q, RB_CTRL), RB_RST_CLR);
sky2_write32(hw, RB_ADDR(q, RB_START), start); sky2_write32(hw, RB_ADDR(q, RB_START), start);
...@@ -639,14 +643,19 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len) ...@@ -639,14 +643,19 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, size_t len)
sky2_write32(hw, RB_ADDR(q, RB_RP), start); sky2_write32(hw, RB_ADDR(q, RB_RP), start);
if (q == Q_R1 || q == Q_R2) { if (q == Q_R1 || q == Q_R2) {
u32 rxup, rxlo; u32 space = (endk - startk) * 4096/8;
u32 tp = space - space/4;
rxlo = len/2; /* On receive queue's set the thresholds
rxup = rxlo + len/4; * give receiver priority when > 3/4 full
* send pause when down to 2K
*/
sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
/* Set thresholds on receive queue's */ tp = space - 2048/8;
sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), rxup); sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), rxlo); sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
} else { } else {
/* Enable store & forward on Tx queue's because /* Enable store & forward on Tx queue's because
* Tx FIFO is only 1K on Yukon * Tx FIFO is only 1K on Yukon
...@@ -695,9 +704,10 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2) ...@@ -695,9 +704,10 @@ static inline struct sky2_tx_le *get_tx_le(struct sky2_port *sky2)
* This is a workaround code taken from SysKonnect sk98lin driver * This is a workaround code taken from SysKonnect sk98lin driver
* to deal with chip bug on Yukon EC rev 0 in the wraparound case. * to deal with chip bug on Yukon EC rev 0 in the wraparound case.
*/ */
static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, static void sky2_put_idx(struct sky2_hw *hw, unsigned q,
u16 idx, u16 *last, u16 size) u16 idx, u16 *last, u16 size)
{ {
wmb();
if (is_ec_a1(hw) && idx < *last) { if (is_ec_a1(hw) && idx < *last) {
u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX)); u16 hwget = sky2_read16(hw, Y2_QADDR(q, PREF_UNIT_GET_IDX));
...@@ -721,6 +731,7 @@ static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q, ...@@ -721,6 +731,7 @@ static inline void sky2_put_idx(struct sky2_hw *hw, unsigned q,
sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx); sky2_write16(hw, Y2_QADDR(q, PREF_UNIT_PUT_IDX), idx);
} }
*last = idx; *last = idx;
mmiowb();
} }
...@@ -734,11 +745,11 @@ static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2) ...@@ -734,11 +745,11 @@ static inline struct sky2_rx_le *sky2_next_rx(struct sky2_port *sky2)
/* Return high part of DMA address (could be 32 or 64 bit) */ /* Return high part of DMA address (could be 32 or 64 bit) */
static inline u32 high32(dma_addr_t a) static inline u32 high32(dma_addr_t a)
{ {
return (a >> 16) >> 16; return sizeof(a) > sizeof(u32) ? (a >> 16) >> 16 : 0;
} }
/* Build description to hardware about buffer */ /* Build description to hardware about buffer */
static inline void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map) static void sky2_rx_add(struct sky2_port *sky2, dma_addr_t map)
{ {
struct sky2_rx_le *le; struct sky2_rx_le *le;
u32 hi = high32(map); u32 hi = high32(map);
...@@ -878,13 +889,13 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp ...@@ -878,13 +889,13 @@ static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp
struct sky2_hw *hw = sky2->hw; struct sky2_hw *hw = sky2->hw;
u16 port = sky2->port; u16 port = sky2->port;
spin_lock(&sky2->tx_lock); spin_lock_bh(&sky2->tx_lock);
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON); sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_ON);
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON); sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_ON);
sky2->vlgrp = grp; sky2->vlgrp = grp;
spin_unlock(&sky2->tx_lock); spin_unlock_bh(&sky2->tx_lock);
} }
static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
...@@ -893,27 +904,42 @@ static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) ...@@ -893,27 +904,42 @@ static void sky2_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
struct sky2_hw *hw = sky2->hw; struct sky2_hw *hw = sky2->hw;
u16 port = sky2->port; u16 port = sky2->port;
spin_lock(&sky2->tx_lock); spin_lock_bh(&sky2->tx_lock);
sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF); sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), RX_VLAN_STRIP_OFF);
sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF); sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T), TX_VLAN_TAG_OFF);
if (sky2->vlgrp) if (sky2->vlgrp)
sky2->vlgrp->vlan_devices[vid] = NULL; sky2->vlgrp->vlan_devices[vid] = NULL;
spin_unlock(&sky2->tx_lock); spin_unlock_bh(&sky2->tx_lock);
} }
#endif #endif
/*
* It appears the hardware has a bug in the FIFO logic that
* cause it to hang if the FIFO gets overrun and the receive buffer
* is not aligned. ALso alloc_skb() won't align properly if slab
* debugging is enabled.
*/
static inline struct sk_buff *sky2_alloc_skb(unsigned int size, gfp_t gfp_mask)
{
struct sk_buff *skb;
skb = alloc_skb(size + RX_SKB_ALIGN, gfp_mask);
if (likely(skb)) {
unsigned long p = (unsigned long) skb->data;
skb_reserve(skb,
((p + RX_SKB_ALIGN - 1) & ~(RX_SKB_ALIGN - 1)) - p);
}
return skb;
}
/* /*
* Allocate and setup receiver buffer pool. * Allocate and setup receiver buffer pool.
* In case of 64 bit dma, there are 2X as many list elements * In case of 64 bit dma, there are 2X as many list elements
* available as ring entries * available as ring entries
* and need to reserve one list element so we don't wrap around. * and need to reserve one list element so we don't wrap around.
*
* It appears the hardware has a bug in the FIFO logic that
* cause it to hang if the FIFO gets overrun and the receive buffer
* is not aligned. This means we can't use skb_reserve to align
* the IP header.
*/ */
static int sky2_rx_start(struct sky2_port *sky2) static int sky2_rx_start(struct sky2_port *sky2)
{ {
...@@ -929,7 +955,7 @@ static int sky2_rx_start(struct sky2_port *sky2) ...@@ -929,7 +955,7 @@ static int sky2_rx_start(struct sky2_port *sky2)
for (i = 0; i < sky2->rx_pending; i++) { for (i = 0; i < sky2->rx_pending; i++) {
struct ring_info *re = sky2->rx_ring + i; struct ring_info *re = sky2->rx_ring + i;
re->skb = dev_alloc_skb(sky2->rx_bufsize); re->skb = sky2_alloc_skb(sky2->rx_bufsize, GFP_KERNEL);
if (!re->skb) if (!re->skb)
goto nomem; goto nomem;
...@@ -986,19 +1012,19 @@ static int sky2_up(struct net_device *dev) ...@@ -986,19 +1012,19 @@ static int sky2_up(struct net_device *dev)
sky2_mac_init(hw, port); sky2_mac_init(hw, port);
/* Configure RAM buffers */ /* Determine available ram buffer space (in 4K blocks).
if (hw->chip_id == CHIP_ID_YUKON_FE || * Note: not sure about the FE setting below yet
(hw->chip_id == CHIP_ID_YUKON_EC && hw->chip_rev == 2)) */
ramsize = 4096; if (hw->chip_id == CHIP_ID_YUKON_FE)
else { ramsize = 4;
u8 e0 = sky2_read8(hw, B2_E_0); else
ramsize = (e0 == 0) ? (128 * 1024) : (e0 * 4096); ramsize = sky2_read8(hw, B2_E_0);
}
/* Give transmitter one third (rounded up) */
rxspace = ramsize - (ramsize + 2) / 3;
/* 2/3 for Rx */
rxspace = (2 * ramsize) / 3;
sky2_ramset(hw, rxqaddr[port], 0, rxspace); sky2_ramset(hw, rxqaddr[port], 0, rxspace);
sky2_ramset(hw, txqaddr[port], rxspace, ramsize - rxspace); sky2_ramset(hw, txqaddr[port], rxspace, ramsize);
/* Make sure SyncQ is disabled */ /* Make sure SyncQ is disabled */
sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL), sky2_write8(hw, RB_ADDR(port == 0 ? Q_XS1 : Q_XS2, RB_CTRL),
...@@ -1054,7 +1080,7 @@ static inline int tx_avail(const struct sky2_port *sky2) ...@@ -1054,7 +1080,7 @@ static inline int tx_avail(const struct sky2_port *sky2)
} }
/* Estimate of number of transmit list elements required */ /* Estimate of number of transmit list elements required */
static inline unsigned tx_le_req(const struct sk_buff *skb) static unsigned tx_le_req(const struct sk_buff *skb)
{ {
unsigned count; unsigned count;
...@@ -1090,6 +1116,10 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) ...@@ -1090,6 +1116,10 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
u16 mss; u16 mss;
u8 ctrl; u8 ctrl;
/* No BH disabling for tx_lock here. We are running in BH disabled
* context and TX reclaim runs via poll inside of a software
* interrupt, and no related locks in IRQ processing.
*/
if (!spin_trylock(&sky2->tx_lock)) if (!spin_trylock(&sky2->tx_lock))
return NETDEV_TX_LOCKED; return NETDEV_TX_LOCKED;
...@@ -1099,8 +1129,9 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) ...@@ -1099,8 +1129,9 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
*/ */
if (!netif_queue_stopped(dev)) { if (!netif_queue_stopped(dev)) {
netif_stop_queue(dev); netif_stop_queue(dev);
printk(KERN_WARNING PFX "%s: ring full when queue awake!\n", if (net_ratelimit())
dev->name); printk(KERN_WARNING PFX "%s: ring full when queue awake!\n",
dev->name);
} }
spin_unlock(&sky2->tx_lock); spin_unlock(&sky2->tx_lock);
...@@ -1199,7 +1230,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) ...@@ -1199,7 +1230,7 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset, mapping = pci_map_page(hw->pdev, frag->page, frag->page_offset,
frag->size, PCI_DMA_TODEVICE); frag->size, PCI_DMA_TODEVICE);
addr64 = (mapping >> 16) >> 16; addr64 = high32(mapping);
if (addr64 != sky2->tx_addr64) { if (addr64 != sky2->tx_addr64) {
le = get_tx_le(sky2); le = get_tx_le(sky2);
le->tx.addr = cpu_to_le32(addr64); le->tx.addr = cpu_to_le32(addr64);
...@@ -1229,7 +1260,6 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev) ...@@ -1229,7 +1260,6 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
netif_stop_queue(dev); netif_stop_queue(dev);
out_unlock: out_unlock:
mmiowb();
spin_unlock(&sky2->tx_lock); spin_unlock(&sky2->tx_lock);
dev->trans_start = jiffies; dev->trans_start = jiffies;
...@@ -1282,17 +1312,17 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done) ...@@ -1282,17 +1312,17 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
} }
spin_lock(&sky2->tx_lock);
sky2->tx_cons = put; sky2->tx_cons = put;
if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE) if (netif_queue_stopped(dev) && tx_avail(sky2) > MAX_SKB_TX_LE)
netif_wake_queue(dev); netif_wake_queue(dev);
spin_unlock(&sky2->tx_lock);
} }
/* Cleanup all untransmitted buffers, assume transmitter not running */ /* Cleanup all untransmitted buffers, assume transmitter not running */
static void sky2_tx_clean(struct sky2_port *sky2) static void sky2_tx_clean(struct sky2_port *sky2)
{ {
spin_lock_bh(&sky2->tx_lock);
sky2_tx_complete(sky2, sky2->tx_prod); sky2_tx_complete(sky2, sky2->tx_prod);
spin_unlock_bh(&sky2->tx_lock);
} }
/* Network shutdown */ /* Network shutdown */
...@@ -1582,28 +1612,40 @@ static void sky2_phy_task(void *arg) ...@@ -1582,28 +1612,40 @@ static void sky2_phy_task(void *arg)
local_irq_enable(); local_irq_enable();
} }
/* Transmit timeout is only called if we are running, carries is up
* and tx queue is full (stopped).
*/
static void sky2_tx_timeout(struct net_device *dev) static void sky2_tx_timeout(struct net_device *dev)
{ {
struct sky2_port *sky2 = netdev_priv(dev); struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw; struct sky2_hw *hw = sky2->hw;
unsigned txq = txqaddr[sky2->port]; unsigned txq = txqaddr[sky2->port];
u16 ridx;
/* Maybe we just missed an status interrupt */
spin_lock(&sky2->tx_lock);
ridx = sky2_read16(hw,
sky2->port == 0 ? STAT_TXA1_RIDX : STAT_TXA2_RIDX);
sky2_tx_complete(sky2, ridx);
spin_unlock(&sky2->tx_lock);
if (!netif_queue_stopped(dev)) {
if (net_ratelimit())
pr_info(PFX "transmit interrupt missed? recovered\n");
return;
}
if (netif_msg_timer(sky2)) if (netif_msg_timer(sky2))
printk(KERN_ERR PFX "%s: tx timeout\n", dev->name); printk(KERN_ERR PFX "%s: tx timeout\n", dev->name);
netif_stop_queue(dev);
sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP); sky2_write32(hw, Q_ADDR(txq, Q_CSR), BMU_STOP);
sky2_read32(hw, Q_ADDR(txq, Q_CSR));
sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET); sky2_write32(hw, Y2_QADDR(txq, PREF_UNIT_CTRL), PREF_UNIT_RST_SET);
sky2_tx_clean(sky2); sky2_tx_clean(sky2);
sky2_qset(hw, txq); sky2_qset(hw, txq);
sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1); sky2_prefetch_init(hw, txq, sky2->tx_le_map, TX_RING_SIZE - 1);
netif_wake_queue(dev);
} }
...@@ -1713,7 +1755,7 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2, ...@@ -1713,7 +1755,7 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2,
} else { } else {
struct sk_buff *nskb; struct sk_buff *nskb;
nskb = dev_alloc_skb(sky2->rx_bufsize); nskb = sky2_alloc_skb(sky2->rx_bufsize, GFP_ATOMIC);
if (!nskb) if (!nskb)
goto resubmit; goto resubmit;
...@@ -1745,7 +1787,7 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2, ...@@ -1745,7 +1787,7 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2,
error: error:
++sky2->net_stats.rx_errors; ++sky2->net_stats.rx_errors;
if (netif_msg_rx_err(sky2)) if (netif_msg_rx_err(sky2) && net_ratelimit())
printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n", printk(KERN_INFO PFX "%s: rx error, status 0x%x length %d\n",
sky2->netdev->name, status, length); sky2->netdev->name, status, length);
...@@ -1766,13 +1808,16 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2, ...@@ -1766,13 +1808,16 @@ static struct sk_buff *sky2_receive(struct sky2_port *sky2,
*/ */
#define TX_NO_STATUS 0xffff #define TX_NO_STATUS 0xffff
static inline void sky2_tx_check(struct sky2_hw *hw, int port, u16 last) static void sky2_tx_check(struct sky2_hw *hw, int port, u16 last)
{ {
if (last != TX_NO_STATUS) { if (last != TX_NO_STATUS) {
struct net_device *dev = hw->dev[port]; struct net_device *dev = hw->dev[port];
if (dev && netif_running(dev)) { if (dev && netif_running(dev)) {
struct sky2_port *sky2 = netdev_priv(dev); struct sky2_port *sky2 = netdev_priv(dev);
spin_lock(&sky2->tx_lock);
sky2_tx_complete(sky2, last); sky2_tx_complete(sky2, last);
spin_unlock(&sky2->tx_lock);
} }
} }
} }
...@@ -1800,7 +1845,6 @@ static int sky2_poll(struct net_device *dev0, int *budget) ...@@ -1800,7 +1845,6 @@ static int sky2_poll(struct net_device *dev0, int *budget)
struct sk_buff *skb; struct sk_buff *skb;
u32 status; u32 status;
u16 length; u16 length;
u8 op;
le = hw->st_le + hw->st_idx; le = hw->st_le + hw->st_idx;
hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE; hw->st_idx = (hw->st_idx + 1) % STATUS_RING_SIZE;
...@@ -1814,10 +1858,8 @@ static int sky2_poll(struct net_device *dev0, int *budget) ...@@ -1814,10 +1858,8 @@ static int sky2_poll(struct net_device *dev0, int *budget)
sky2 = netdev_priv(dev); sky2 = netdev_priv(dev);
status = le32_to_cpu(le->status); status = le32_to_cpu(le->status);
length = le16_to_cpu(le->length); length = le16_to_cpu(le->length);
op = le->opcode & ~HW_OWNER;
le->opcode = 0;
switch (op) { switch (le->opcode & ~HW_OWNER) {
case OP_RXSTAT: case OP_RXSTAT:
skb = sky2_receive(sky2, length, status); skb = sky2_receive(sky2, length, status);
if (!skb) if (!skb)
...@@ -1865,14 +1907,13 @@ static int sky2_poll(struct net_device *dev0, int *budget) ...@@ -1865,14 +1907,13 @@ static int sky2_poll(struct net_device *dev0, int *budget)
default: default:
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_WARNING PFX printk(KERN_WARNING PFX
"unknown status opcode 0x%x\n", op); "unknown status opcode 0x%x\n", le->opcode);
break; break;
} }
} }
exit_loop: exit_loop:
sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ);
mmiowb();
sky2_tx_check(hw, 0, tx_done[0]); sky2_tx_check(hw, 0, tx_done[0]);
sky2_tx_check(hw, 1, tx_done[1]); sky2_tx_check(hw, 1, tx_done[1]);
...@@ -1887,7 +1928,6 @@ static int sky2_poll(struct net_device *dev0, int *budget) ...@@ -1887,7 +1928,6 @@ static int sky2_poll(struct net_device *dev0, int *budget)
netif_rx_complete(dev0); netif_rx_complete(dev0);
hw->intr_mask |= Y2_IS_STAT_BMU; hw->intr_mask |= Y2_IS_STAT_BMU;
sky2_write32(hw, B0_IMSK, hw->intr_mask); sky2_write32(hw, B0_IMSK, hw->intr_mask);
mmiowb();
return 0; return 0;
} else { } else {
*budget -= work_done; *budget -= work_done;
...@@ -1900,35 +1940,42 @@ static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status) ...@@ -1900,35 +1940,42 @@ static void sky2_hw_error(struct sky2_hw *hw, unsigned port, u32 status)
{ {
struct net_device *dev = hw->dev[port]; struct net_device *dev = hw->dev[port];
printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n", if (net_ratelimit())
dev->name, status); printk(KERN_INFO PFX "%s: hw error interrupt status 0x%x\n",
dev->name, status);
if (status & Y2_IS_PAR_RD1) { if (status & Y2_IS_PAR_RD1) {
printk(KERN_ERR PFX "%s: ram data read parity error\n", if (net_ratelimit())
dev->name); printk(KERN_ERR PFX "%s: ram data read parity error\n",
dev->name);
/* Clear IRQ */ /* Clear IRQ */
sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR); sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_RD_PERR);
} }
if (status & Y2_IS_PAR_WR1) { if (status & Y2_IS_PAR_WR1) {
printk(KERN_ERR PFX "%s: ram data write parity error\n", if (net_ratelimit())
dev->name); printk(KERN_ERR PFX "%s: ram data write parity error\n",
dev->name);
sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR); sky2_write16(hw, RAM_BUFFER(port, B3_RI_CTRL), RI_CLR_WR_PERR);
} }
if (status & Y2_IS_PAR_MAC1) { if (status & Y2_IS_PAR_MAC1) {
printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name); if (net_ratelimit())
printk(KERN_ERR PFX "%s: MAC parity error\n", dev->name);
sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE); sky2_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_CLI_TX_PE);
} }
if (status & Y2_IS_PAR_RX1) { if (status & Y2_IS_PAR_RX1) {
printk(KERN_ERR PFX "%s: RX parity error\n", dev->name); if (net_ratelimit())
printk(KERN_ERR PFX "%s: RX parity error\n", dev->name);
sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR); sky2_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), BMU_CLR_IRQ_PAR);
} }
if (status & Y2_IS_TCP_TXA1) { if (status & Y2_IS_TCP_TXA1) {
printk(KERN_ERR PFX "%s: TCP segmentation error\n", dev->name); if (net_ratelimit())
printk(KERN_ERR PFX "%s: TCP segmentation error\n",
dev->name);
sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP); sky2_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), BMU_CLR_IRQ_TCP);
} }
} }
...@@ -1944,8 +1991,9 @@ static void sky2_hw_intr(struct sky2_hw *hw) ...@@ -1944,8 +1991,9 @@ static void sky2_hw_intr(struct sky2_hw *hw)
u16 pci_err; u16 pci_err;
pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err); pci_read_config_word(hw->pdev, PCI_STATUS, &pci_err);
printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n", if (net_ratelimit())
pci_name(hw->pdev), pci_err); printk(KERN_ERR PFX "%s: pci hw error (0x%x)\n",
pci_name(hw->pdev), pci_err);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
pci_write_config_word(hw->pdev, PCI_STATUS, pci_write_config_word(hw->pdev, PCI_STATUS,
...@@ -1959,8 +2007,9 @@ static void sky2_hw_intr(struct sky2_hw *hw) ...@@ -1959,8 +2007,9 @@ static void sky2_hw_intr(struct sky2_hw *hw)
pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err); pci_read_config_dword(hw->pdev, PEX_UNC_ERR_STAT, &pex_err);
printk(KERN_ERR PFX "%s: pci express error (0x%x)\n", if (net_ratelimit())
pci_name(hw->pdev), pex_err); printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
pci_name(hw->pdev), pex_err);
/* clear the interrupt */ /* clear the interrupt */
sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
...@@ -2250,7 +2299,7 @@ static int sky2_reset(struct sky2_hw *hw) ...@@ -2250,7 +2299,7 @@ static int sky2_reset(struct sky2_hw *hw)
return 0; return 0;
} }
static inline u32 sky2_supported_modes(const struct sky2_hw *hw) static u32 sky2_supported_modes(const struct sky2_hw *hw)
{ {
u32 modes; u32 modes;
if (hw->copper) { if (hw->copper) {
...@@ -2995,7 +3044,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, ...@@ -2995,7 +3044,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
return dev; return dev;
} }
static inline void sky2_show_addr(struct net_device *dev) static void __devinit sky2_show_addr(struct net_device *dev)
{ {
const struct sky2_port *sky2 = netdev_priv(dev); const struct sky2_port *sky2 = netdev_priv(dev);
...@@ -3038,13 +3087,17 @@ static int __devinit sky2_probe(struct pci_dev *pdev, ...@@ -3038,13 +3087,17 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
goto err_out_free_regions; goto err_out_free_regions;
} }
if (sizeof(dma_addr_t) > sizeof(u32)) { if (sizeof(dma_addr_t) > sizeof(u32) &&
err = pci_set_dma_mask(pdev, DMA_64BIT_MASK); !(err = pci_set_dma_mask(pdev, DMA_64BIT_MASK))) {
if (!err) using_dac = 1;
using_dac = 1; err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
} if (err < 0) {
printk(KERN_ERR PFX "%s unable to obtain 64 bit DMA "
"for consistent allocations\n", pci_name(pdev));
goto err_out_free_regions;
}
if (!using_dac) { } else {
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (err) { if (err) {
printk(KERN_ERR PFX "%s no usable DMA configuration\n", printk(KERN_ERR PFX "%s no usable DMA configuration\n",
...@@ -3052,6 +3105,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev, ...@@ -3052,6 +3105,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
goto err_out_free_regions; goto err_out_free_regions;
} }
} }
#ifdef __BIG_ENDIAN #ifdef __BIG_ENDIAN
/* byte swap descriptors in hardware */ /* byte swap descriptors in hardware */
{ {
...@@ -3064,14 +3118,13 @@ static int __devinit sky2_probe(struct pci_dev *pdev, ...@@ -3064,14 +3118,13 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
#endif #endif
err = -ENOMEM; err = -ENOMEM;
hw = kmalloc(sizeof(*hw), GFP_KERNEL); hw = kzalloc(sizeof(*hw), GFP_KERNEL);
if (!hw) { if (!hw) {
printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n", printk(KERN_ERR PFX "%s: cannot allocate hardware struct\n",
pci_name(pdev)); pci_name(pdev));
goto err_out_free_regions; goto err_out_free_regions;
} }
memset(hw, 0, sizeof(*hw));
hw->pdev = pdev; hw->pdev = pdev;
hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000); hw->regs = ioremap_nocache(pci_resource_start(pdev, 0), 0x4000);
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
*/ */
#include <linux/config.h> #include <linux/config.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/crc32.h> #include <linux/crc32.h>
#include <linux/delay.h> #include <linux/delay.h>
...@@ -30,6 +29,7 @@ ...@@ -30,6 +29,7 @@
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/if_vlan.h> #include <linux/if_vlan.h>
#include <linux/in.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/ip.h> #include <linux/ip.h>
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/tcp.h> #include <linux/tcp.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/wait.h> #include <linux/wait.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <asm/bitops.h> #include <asm/bitops.h>
...@@ -108,42 +109,6 @@ spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value) ...@@ -108,42 +109,6 @@ spider_net_write_reg(struct spider_net_card *card, u32 reg, u32 value)
writel(value, card->regs + reg); writel(value, card->regs + reg);
} }
/**
* spider_net_write_reg_sync - writes to an SMMIO register of a card
* @card: device structure
* @reg: register to write to
* @value: value to write into the specified SMMIO register
*
* Unlike spider_net_write_reg, this will also make sure the
* data arrives on the card by reading the reg again.
*/
static void
spider_net_write_reg_sync(struct spider_net_card *card, u32 reg, u32 value)
{
value = cpu_to_le32(value);
writel(value, card->regs + reg);
(void)readl(card->regs + reg);
}
/**
* spider_net_rx_irq_off - switch off rx irq on this spider card
* @card: device structure
*
* switches off rx irq by masking them out in the GHIINTnMSK register
*/
static void
spider_net_rx_irq_off(struct spider_net_card *card)
{
u32 regvalue;
unsigned long flags;
spin_lock_irqsave(&card->intmask_lock, flags);
regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
regvalue &= ~SPIDER_NET_RXINT;
spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
spin_unlock_irqrestore(&card->intmask_lock, flags);
}
/** spider_net_write_phy - write to phy register /** spider_net_write_phy - write to phy register
* @netdev: adapter to be written to * @netdev: adapter to be written to
* @mii_id: id of MII * @mii_id: id of MII
...@@ -199,60 +164,33 @@ spider_net_read_phy(struct net_device *netdev, int mii_id, int reg) ...@@ -199,60 +164,33 @@ spider_net_read_phy(struct net_device *netdev, int mii_id, int reg)
} }
/** /**
* spider_net_rx_irq_on - switch on rx irq on this spider card * spider_net_rx_irq_off - switch off rx irq on this spider card
* @card: device structure
*
* switches on rx irq by enabling them in the GHIINTnMSK register
*/
static void
spider_net_rx_irq_on(struct spider_net_card *card)
{
u32 regvalue;
unsigned long flags;
spin_lock_irqsave(&card->intmask_lock, flags);
regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK);
regvalue |= SPIDER_NET_RXINT;
spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
spin_unlock_irqrestore(&card->intmask_lock, flags);
}
/**
* spider_net_tx_irq_off - switch off tx irq on this spider card
* @card: device structure * @card: device structure
* *
* switches off tx irq by masking them out in the GHIINTnMSK register * switches off rx irq by masking them out in the GHIINTnMSK register
*/ */
static void static void
spider_net_tx_irq_off(struct spider_net_card *card) spider_net_rx_irq_off(struct spider_net_card *card)
{ {
u32 regvalue; u32 regvalue;
unsigned long flags;
spin_lock_irqsave(&card->intmask_lock, flags); regvalue = SPIDER_NET_INT0_MASK_VALUE & (~SPIDER_NET_RXINT);
regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK); spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
regvalue &= ~SPIDER_NET_TXINT;
spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
spin_unlock_irqrestore(&card->intmask_lock, flags);
} }
/** /**
* spider_net_tx_irq_on - switch on tx irq on this spider card * spider_net_rx_irq_on - switch on rx irq on this spider card
* @card: device structure * @card: device structure
* *
* switches on tx irq by enabling them in the GHIINTnMSK register * switches on rx irq by enabling them in the GHIINTnMSK register
*/ */
static void static void
spider_net_tx_irq_on(struct spider_net_card *card) spider_net_rx_irq_on(struct spider_net_card *card)
{ {
u32 regvalue; u32 regvalue;
unsigned long flags;
spin_lock_irqsave(&card->intmask_lock, flags); regvalue = SPIDER_NET_INT0_MASK_VALUE | SPIDER_NET_RXINT;
regvalue = spider_net_read_reg(card, SPIDER_NET_GHIINT0MSK); spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, regvalue);
regvalue |= SPIDER_NET_TXINT;
spider_net_write_reg_sync(card, SPIDER_NET_GHIINT0MSK, regvalue);
spin_unlock_irqrestore(&card->intmask_lock, flags);
} }
/** /**
...@@ -326,9 +264,8 @@ static enum spider_net_descr_status ...@@ -326,9 +264,8 @@ static enum spider_net_descr_status
spider_net_get_descr_status(struct spider_net_descr *descr) spider_net_get_descr_status(struct spider_net_descr *descr)
{ {
u32 cmd_status; u32 cmd_status;
rmb();
cmd_status = descr->dmac_cmd_status; cmd_status = descr->dmac_cmd_status;
rmb();
cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT; cmd_status >>= SPIDER_NET_DESCR_IND_PROC_SHIFT;
/* no need to mask out any bits, as cmd_status is 32 bits wide only /* no need to mask out any bits, as cmd_status is 32 bits wide only
* (and unsigned) */ * (and unsigned) */
...@@ -349,7 +286,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr, ...@@ -349,7 +286,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr,
{ {
u32 cmd_status; u32 cmd_status;
/* read the status */ /* read the status */
mb();
cmd_status = descr->dmac_cmd_status; cmd_status = descr->dmac_cmd_status;
/* clean the upper 4 bits */ /* clean the upper 4 bits */
cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO; cmd_status &= SPIDER_NET_DESCR_IND_PROC_MASKO;
...@@ -357,7 +293,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr, ...@@ -357,7 +293,6 @@ spider_net_set_descr_status(struct spider_net_descr *descr,
cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT; cmd_status |= ((u32)status)<<SPIDER_NET_DESCR_IND_PROC_SHIFT;
/* and write it back */ /* and write it back */
descr->dmac_cmd_status = cmd_status; descr->dmac_cmd_status = cmd_status;
wmb();
} }
/** /**
...@@ -398,8 +333,9 @@ spider_net_init_chain(struct spider_net_card *card, ...@@ -398,8 +333,9 @@ spider_net_init_chain(struct spider_net_card *card,
{ {
int i; int i;
struct spider_net_descr *descr; struct spider_net_descr *descr;
dma_addr_t buf;
spin_lock_init(&card->chain_lock); atomic_set(&card->rx_chain_refill,0);
descr = start_descr; descr = start_descr;
memset(descr, 0, sizeof(*descr) * no); memset(descr, 0, sizeof(*descr) * no);
...@@ -408,14 +344,14 @@ spider_net_init_chain(struct spider_net_card *card, ...@@ -408,14 +344,14 @@ spider_net_init_chain(struct spider_net_card *card,
for (i=0; i<no; i++, descr++) { for (i=0; i<no; i++, descr++) {
spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
descr->bus_addr = buf = pci_map_single(card->pdev, descr,
pci_map_single(card->pdev, descr, SPIDER_NET_DESCR_SIZE,
SPIDER_NET_DESCR_SIZE, PCI_DMA_BIDIRECTIONAL);
PCI_DMA_BIDIRECTIONAL);
if (descr->bus_addr == DMA_ERROR_CODE) if (buf == DMA_ERROR_CODE)
goto iommu_error; goto iommu_error;
descr->bus_addr = buf;
descr->next = descr + 1; descr->next = descr + 1;
descr->prev = descr - 1; descr->prev = descr - 1;
...@@ -439,7 +375,8 @@ spider_net_init_chain(struct spider_net_card *card, ...@@ -439,7 +375,8 @@ spider_net_init_chain(struct spider_net_card *card,
for (i=0; i < no; i++, descr++) for (i=0; i < no; i++, descr++)
if (descr->bus_addr) if (descr->bus_addr)
pci_unmap_single(card->pdev, descr->bus_addr, pci_unmap_single(card->pdev, descr->bus_addr,
SPIDER_NET_DESCR_SIZE, PCI_DMA_BIDIRECTIONAL); SPIDER_NET_DESCR_SIZE,
PCI_DMA_BIDIRECTIONAL);
return -ENOMEM; return -ENOMEM;
} }
...@@ -459,7 +396,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card) ...@@ -459,7 +396,7 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
if (descr->skb) { if (descr->skb) {
dev_kfree_skb(descr->skb); dev_kfree_skb(descr->skb);
pci_unmap_single(card->pdev, descr->buf_addr, pci_unmap_single(card->pdev, descr->buf_addr,
SPIDER_NET_MAX_MTU, SPIDER_NET_MAX_FRAME,
PCI_DMA_BIDIRECTIONAL); PCI_DMA_BIDIRECTIONAL);
} }
descr = descr->next; descr = descr->next;
...@@ -480,12 +417,13 @@ static int ...@@ -480,12 +417,13 @@ static int
spider_net_prepare_rx_descr(struct spider_net_card *card, spider_net_prepare_rx_descr(struct spider_net_card *card,
struct spider_net_descr *descr) struct spider_net_descr *descr)
{ {
dma_addr_t buf;
int error = 0; int error = 0;
int offset; int offset;
int bufsize; int bufsize;
/* we need to round up the buffer size to a multiple of 128 */ /* we need to round up the buffer size to a multiple of 128 */
bufsize = (SPIDER_NET_MAX_MTU + SPIDER_NET_RXBUF_ALIGN - 1) & bufsize = (SPIDER_NET_MAX_FRAME + SPIDER_NET_RXBUF_ALIGN - 1) &
(~(SPIDER_NET_RXBUF_ALIGN - 1)); (~(SPIDER_NET_RXBUF_ALIGN - 1));
/* and we need to have it 128 byte aligned, therefore we allocate a /* and we need to have it 128 byte aligned, therefore we allocate a
...@@ -493,10 +431,8 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, ...@@ -493,10 +431,8 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
/* allocate an skb */ /* allocate an skb */
descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1); descr->skb = dev_alloc_skb(bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
if (!descr->skb) { if (!descr->skb) {
if (net_ratelimit()) if (netif_msg_rx_err(card) && net_ratelimit())
if (netif_msg_rx_err(card)) pr_err("Not enough memory to allocate rx buffer\n");
pr_err("Not enough memory to allocate "
"rx buffer\n");
return -ENOMEM; return -ENOMEM;
} }
descr->buf_size = bufsize; descr->buf_size = bufsize;
...@@ -510,12 +446,12 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, ...@@ -510,12 +446,12 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
if (offset) if (offset)
skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset);
/* io-mmu-map the skb */ /* io-mmu-map the skb */
descr->buf_addr = pci_map_single(card->pdev, descr->skb->data, buf = pci_map_single(card->pdev, descr->skb->data,
SPIDER_NET_MAX_MTU, SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
PCI_DMA_BIDIRECTIONAL); descr->buf_addr = buf;
if (descr->buf_addr == DMA_ERROR_CODE) { if (buf == DMA_ERROR_CODE) {
dev_kfree_skb_any(descr->skb); dev_kfree_skb_any(descr->skb);
if (netif_msg_rx_err(card)) if (netif_msg_rx_err(card) && net_ratelimit())
pr_err("Could not iommu-map rx buffer\n"); pr_err("Could not iommu-map rx buffer\n");
spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
} else { } else {
...@@ -526,10 +462,10 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, ...@@ -526,10 +462,10 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
} }
/** /**
* spider_net_enable_rxctails - sets RX dmac chain tail addresses * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
* @card: card structure * @card: card structure
* *
* spider_net_enable_rxctails sets the RX DMAC chain tail adresses in the * spider_net_enable_rxchtails sets the RX DMAC chain tail adresses in the
* chip by writing to the appropriate register. DMA is enabled in * chip by writing to the appropriate register. DMA is enabled in
* spider_net_enable_rxdmac. * spider_net_enable_rxdmac.
*/ */
...@@ -551,6 +487,7 @@ spider_net_enable_rxchtails(struct spider_net_card *card) ...@@ -551,6 +487,7 @@ spider_net_enable_rxchtails(struct spider_net_card *card)
static void static void
spider_net_enable_rxdmac(struct spider_net_card *card) spider_net_enable_rxdmac(struct spider_net_card *card)
{ {
wmb();
spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR, spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
SPIDER_NET_DMA_RX_VALUE); SPIDER_NET_DMA_RX_VALUE);
} }
...@@ -559,32 +496,28 @@ spider_net_enable_rxdmac(struct spider_net_card *card) ...@@ -559,32 +496,28 @@ spider_net_enable_rxdmac(struct spider_net_card *card)
* spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
* @card: card structure * @card: card structure
* *
* refills descriptors in all chains (last used chain first): allocates skbs * refills descriptors in the rx chain: allocates skbs and iommu-maps them.
* and iommu-maps them.
*/ */
static void static void
spider_net_refill_rx_chain(struct spider_net_card *card) spider_net_refill_rx_chain(struct spider_net_card *card)
{ {
struct spider_net_descr_chain *chain; struct spider_net_descr_chain *chain;
int count = 0;
unsigned long flags;
chain = &card->rx_chain; chain = &card->rx_chain;
spin_lock_irqsave(&card->chain_lock, flags); /* one context doing the refill (and a second context seeing that
while (spider_net_get_descr_status(chain->head) == * and omitting it) is ok. If called by NAPI, we'll be called again
SPIDER_NET_DESCR_NOT_IN_USE) { * as spider_net_decode_one_descr is called several times. If some
if (spider_net_prepare_rx_descr(card, chain->head)) * interrupt calls us, the NAPI is about to clean up anyway. */
break; if (atomic_inc_return(&card->rx_chain_refill) == 1)
count++; while (spider_net_get_descr_status(chain->head) ==
chain->head = chain->head->next; SPIDER_NET_DESCR_NOT_IN_USE) {
} if (spider_net_prepare_rx_descr(card, chain->head))
spin_unlock_irqrestore(&card->chain_lock, flags); break;
chain->head = chain->head->next;
}
/* could be optimized, only do that, if we know the DMA processing atomic_dec(&card->rx_chain_refill);
* has terminated */
if (count)
spider_net_enable_rxdmac(card);
} }
/** /**
...@@ -613,6 +546,7 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card) ...@@ -613,6 +546,7 @@ spider_net_alloc_rx_skbs(struct spider_net_card *card)
/* this will allocate the rest of the rx buffers; if not, it's /* this will allocate the rest of the rx buffers; if not, it's
* business as usual later on */ * business as usual later on */
spider_net_refill_rx_chain(card); spider_net_refill_rx_chain(card);
spider_net_enable_rxdmac(card);
return 0; return 0;
error: error:
...@@ -649,24 +583,30 @@ spider_net_release_tx_descr(struct spider_net_card *card, ...@@ -649,24 +583,30 @@ spider_net_release_tx_descr(struct spider_net_card *card,
* @card: adapter structure * @card: adapter structure
* @brutal: if set, don't care about whether descriptor seems to be in use * @brutal: if set, don't care about whether descriptor seems to be in use
* *
* releases the tx descriptors that spider has finished with (if non-brutal) * returns 0 if the tx ring is empty, otherwise 1.
* or simply release tx descriptors (if brutal) *
* spider_net_release_tx_chain releases the tx descriptors that spider has
* finished with (if non-brutal) or simply release tx descriptors (if brutal).
* If some other context is calling this function, we return 1 so that we're
* scheduled again (if we were scheduled) and will not loose initiative.
*/ */
static void static int
spider_net_release_tx_chain(struct spider_net_card *card, int brutal) spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
{ {
struct spider_net_descr_chain *tx_chain = &card->tx_chain; struct spider_net_descr_chain *tx_chain = &card->tx_chain;
enum spider_net_descr_status status; enum spider_net_descr_status status;
spider_net_tx_irq_off(card); if (atomic_inc_return(&card->tx_chain_release) != 1) {
atomic_dec(&card->tx_chain_release);
return 1;
}
/* no lock for chain needed, if this is only executed once at a time */
again:
for (;;) { for (;;) {
status = spider_net_get_descr_status(tx_chain->tail); status = spider_net_get_descr_status(tx_chain->tail);
switch (status) { switch (status) {
case SPIDER_NET_DESCR_CARDOWNED: case SPIDER_NET_DESCR_CARDOWNED:
if (!brutal) goto out; if (!brutal)
goto out;
/* fallthrough, if we release the descriptors /* fallthrough, if we release the descriptors
* brutally (then we don't care about * brutally (then we don't care about
* SPIDER_NET_DESCR_CARDOWNED) */ * SPIDER_NET_DESCR_CARDOWNED) */
...@@ -693,25 +633,30 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal) ...@@ -693,25 +633,30 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
tx_chain->tail = tx_chain->tail->next; tx_chain->tail = tx_chain->tail->next;
} }
out: out:
atomic_dec(&card->tx_chain_release);
netif_wake_queue(card->netdev); netif_wake_queue(card->netdev);
if (!brutal) { if (status == SPIDER_NET_DESCR_CARDOWNED)
/* switch on tx irqs (while we are still in the interrupt return 1;
* handler, so we don't get an interrupt), check again return 0;
* for done descriptors. This results in fewer interrupts */ }
spider_net_tx_irq_on(card);
status = spider_net_get_descr_status(tx_chain->tail);
switch (status) {
case SPIDER_NET_DESCR_RESPONSE_ERROR:
case SPIDER_NET_DESCR_PROTECTION_ERROR:
case SPIDER_NET_DESCR_FORCE_END:
case SPIDER_NET_DESCR_COMPLETE:
goto again;
default:
break;
}
}
/**
* spider_net_cleanup_tx_ring - cleans up the TX ring
* @card: card structure
*
* spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use
* interrupts to cleanup our TX ring) and returns sent packets to the stack
* by freeing them
*/
static void
spider_net_cleanup_tx_ring(struct spider_net_card *card)
{
if ( (spider_net_release_tx_chain(card, 0)) &&
(card->netdev->flags & IFF_UP) ) {
mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
}
} }
/** /**
...@@ -726,16 +671,22 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal) ...@@ -726,16 +671,22 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
static u8 static u8
spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr) spider_net_get_multicast_hash(struct net_device *netdev, __u8 *addr)
{ {
/* FIXME: an addr of 01:00:5e:00:00:01 must result in 0xa9,
* ff:ff:ff:ff:ff:ff must result in 0xfd */
u32 crc; u32 crc;
u8 hash; u8 hash;
char addr_for_crc[ETH_ALEN] = { 0, };
int i, bit;
crc = crc32_be(~0, addr, netdev->addr_len); for (i = 0; i < ETH_ALEN * 8; i++) {
bit = (addr[i / 8] >> (i % 8)) & 1;
addr_for_crc[ETH_ALEN - 1 - i / 8] += bit << (7 - (i % 8));
}
crc = crc32_be(~0, addr_for_crc, netdev->addr_len);
hash = (crc >> 27); hash = (crc >> 27);
hash <<= 3; hash <<= 3;
hash |= crc & 7; hash |= crc & 7;
hash &= 0xff;
return hash; return hash;
} }
...@@ -821,9 +772,11 @@ spider_net_stop(struct net_device *netdev) ...@@ -821,9 +772,11 @@ spider_net_stop(struct net_device *netdev)
{ {
struct spider_net_card *card = netdev_priv(netdev); struct spider_net_card *card = netdev_priv(netdev);
tasklet_kill(&card->rxram_full_tl);
netif_poll_disable(netdev); netif_poll_disable(netdev);
netif_carrier_off(netdev); netif_carrier_off(netdev);
netif_stop_queue(netdev); netif_stop_queue(netdev);
del_timer_sync(&card->tx_timer);
/* disable/mask all interrupts */ /* disable/mask all interrupts */
spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0); spider_net_write_reg(card, SPIDER_NET_GHIINT0MSK, 0);
...@@ -872,13 +825,15 @@ spider_net_get_next_tx_descr(struct spider_net_card *card) ...@@ -872,13 +825,15 @@ spider_net_get_next_tx_descr(struct spider_net_card *card)
* @skb: packet to consider * @skb: packet to consider
* *
* fills out the command and status field of the descriptor structure, * fills out the command and status field of the descriptor structure,
* depending on hardware checksum settings. This function assumes a wmb() * depending on hardware checksum settings.
* has executed before.
*/ */
static void static void
spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr, spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
struct sk_buff *skb) struct sk_buff *skb)
{ {
/* make sure the other fields in the descriptor are written */
wmb();
if (skb->ip_summed != CHECKSUM_HW) { if (skb->ip_summed != CHECKSUM_HW) {
descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
return; return;
...@@ -887,14 +842,13 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr, ...@@ -887,14 +842,13 @@ spider_net_set_txdescr_cmdstat(struct spider_net_descr *descr,
/* is packet ip? /* is packet ip?
* if yes: tcp? udp? */ * if yes: tcp? udp? */
if (skb->protocol == htons(ETH_P_IP)) { if (skb->protocol == htons(ETH_P_IP)) {
if (skb->nh.iph->protocol == IPPROTO_TCP) { if (skb->nh.iph->protocol == IPPROTO_TCP)
descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS; descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_TCPCS;
} else if (skb->nh.iph->protocol == IPPROTO_UDP) { else if (skb->nh.iph->protocol == IPPROTO_UDP)
descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS; descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_UDPCS;
} else { /* the stack should checksum non-tcp and non-udp else /* the stack should checksum non-tcp and non-udp
packets on his own: NETIF_F_IP_CSUM */ packets on his own: NETIF_F_IP_CSUM */
descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS; descr->dmac_cmd_status = SPIDER_NET_DMAC_CMDSTAT_NOCS;
}
} }
} }
...@@ -914,23 +868,22 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, ...@@ -914,23 +868,22 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
struct spider_net_descr *descr, struct spider_net_descr *descr,
struct sk_buff *skb) struct sk_buff *skb)
{ {
descr->buf_addr = pci_map_single(card->pdev, skb->data, dma_addr_t buf;
skb->len, PCI_DMA_BIDIRECTIONAL);
if (descr->buf_addr == DMA_ERROR_CODE) { buf = pci_map_single(card->pdev, skb->data,
if (netif_msg_tx_err(card)) skb->len, PCI_DMA_BIDIRECTIONAL);
if (buf == DMA_ERROR_CODE) {
if (netif_msg_tx_err(card) && net_ratelimit())
pr_err("could not iommu-map packet (%p, %i). " pr_err("could not iommu-map packet (%p, %i). "
"Dropping packet\n", skb->data, skb->len); "Dropping packet\n", skb->data, skb->len);
return -ENOMEM; return -ENOMEM;
} }
descr->buf_addr = buf;
descr->buf_size = skb->len; descr->buf_size = skb->len;
descr->skb = skb; descr->skb = skb;
descr->data_status = 0; descr->data_status = 0;
/* make sure the above values are in memory before we change the
* status */
wmb();
spider_net_set_txdescr_cmdstat(descr,skb); spider_net_set_txdescr_cmdstat(descr,skb);
return 0; return 0;
...@@ -972,17 +925,12 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -972,17 +925,12 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
struct spider_net_descr *descr; struct spider_net_descr *descr;
int result; int result;
descr = spider_net_get_next_tx_descr(card); spider_net_release_tx_chain(card, 0);
if (!descr) { descr = spider_net_get_next_tx_descr(card);
netif_stop_queue(netdev);
descr = spider_net_get_next_tx_descr(card); if (!descr)
if (!descr) goto error;
goto error;
else
netif_start_queue(netdev);
}
result = spider_net_prepare_tx_descr(card, descr, skb); result = spider_net_prepare_tx_descr(card, descr, skb);
if (result) if (result)
...@@ -990,19 +938,25 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -990,19 +938,25 @@ spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
card->tx_chain.head = card->tx_chain.head->next; card->tx_chain.head = card->tx_chain.head->next;
/* make sure the status from spider_net_prepare_tx_descr is in
* memory before we check out the previous descriptor */
wmb();
if (spider_net_get_descr_status(descr->prev) != if (spider_net_get_descr_status(descr->prev) !=
SPIDER_NET_DESCR_CARDOWNED) SPIDER_NET_DESCR_CARDOWNED) {
spider_net_kick_tx_dma(card, descr); /* make sure the current descriptor is in memory. Then
* kicking it on again makes sense, if the previous is not
* card-owned anymore. Check the previous descriptor twice
* to omit an mb() in heavy traffic cases */
mb();
if (spider_net_get_descr_status(descr->prev) !=
SPIDER_NET_DESCR_CARDOWNED)
spider_net_kick_tx_dma(card, descr);
}
mod_timer(&card->tx_timer, jiffies + SPIDER_NET_TX_TIMER);
return NETDEV_TX_OK; return NETDEV_TX_OK;
error: error:
card->netdev_stats.tx_dropped++; card->netdev_stats.tx_dropped++;
return NETDEV_TX_LOCKED; return NETDEV_TX_BUSY;
} }
/** /**
...@@ -1027,6 +981,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) ...@@ -1027,6 +981,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
* spider_net_pass_skb_up - takes an skb from a descriptor and passes it on * spider_net_pass_skb_up - takes an skb from a descriptor and passes it on
* @descr: descriptor to process * @descr: descriptor to process
* @card: card structure * @card: card structure
* @napi: whether caller is in NAPI context
* *
* returns 1 on success, 0 if no packet was passed to the stack * returns 1 on success, 0 if no packet was passed to the stack
* *
...@@ -1035,7 +990,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) ...@@ -1035,7 +990,7 @@ spider_net_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
*/ */
static int static int
spider_net_pass_skb_up(struct spider_net_descr *descr, spider_net_pass_skb_up(struct spider_net_descr *descr,
struct spider_net_card *card) struct spider_net_card *card, int napi)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct net_device *netdev; struct net_device *netdev;
...@@ -1046,22 +1001,20 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, ...@@ -1046,22 +1001,20 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
netdev = card->netdev; netdev = card->netdev;
/* check for errors in the data_error flag */ /* unmap descriptor */
if ((data_error & SPIDER_NET_DATA_ERROR_MASK) && pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_FRAME,
netif_msg_rx_err(card))
pr_err("error in received descriptor found, "
"data_status=x%08x, data_error=x%08x\n",
data_status, data_error);
/* prepare skb, unmap descriptor */
skb = descr->skb;
pci_unmap_single(card->pdev, descr->buf_addr, SPIDER_NET_MAX_MTU,
PCI_DMA_BIDIRECTIONAL); PCI_DMA_BIDIRECTIONAL);
/* the cases we'll throw away the packet immediately */ /* the cases we'll throw away the packet immediately */
if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) if (data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
if (netif_msg_rx_err(card))
pr_err("error in received descriptor found, "
"data_status=x%08x, data_error=x%08x\n",
data_status, data_error);
return 0; return 0;
}
skb = descr->skb;
skb->dev = netdev; skb->dev = netdev;
skb_put(skb, descr->valid_size); skb_put(skb, descr->valid_size);
...@@ -1073,14 +1026,14 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, ...@@ -1073,14 +1026,14 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
/* checksum offload */ /* checksum offload */
if (card->options.rx_csum) { if (card->options.rx_csum) {
if ( (data_status & SPIDER_NET_DATA_STATUS_CHK_MASK) && if ( ( (data_status & SPIDER_NET_DATA_STATUS_CKSUM_MASK) ==
(!(data_error & SPIDER_NET_DATA_ERROR_CHK_MASK)) ) SPIDER_NET_DATA_STATUS_CKSUM_MASK) &&
!(data_error & SPIDER_NET_DATA_ERR_CKSUM_MASK))
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
else else
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
} else { } else
skb->ip_summed = CHECKSUM_NONE; skb->ip_summed = CHECKSUM_NONE;
}
if (data_status & SPIDER_NET_VLAN_PACKET) { if (data_status & SPIDER_NET_VLAN_PACKET) {
/* further enhancements: HW-accel VLAN /* further enhancements: HW-accel VLAN
...@@ -1089,7 +1042,10 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, ...@@ -1089,7 +1042,10 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
} }
/* pass skb up to stack */ /* pass skb up to stack */
netif_receive_skb(skb); if (napi)
netif_receive_skb(skb);
else
netif_rx_ni(skb);
/* update netdevice statistics */ /* update netdevice statistics */
card->netdev_stats.rx_packets++; card->netdev_stats.rx_packets++;
...@@ -1099,16 +1055,18 @@ spider_net_pass_skb_up(struct spider_net_descr *descr, ...@@ -1099,16 +1055,18 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
} }
/** /**
* spider_net_decode_descr - processes an rx descriptor * spider_net_decode_one_descr - processes an rx descriptor
* @card: card structure * @card: card structure
* @napi: whether caller is in NAPI context
* *
* returns 1 if a packet has been sent to the stack, otherwise 0 * returns 1 if a packet has been sent to the stack, otherwise 0
* *
* processes an rx descriptor by iommu-unmapping the data buffer and passing * processes an rx descriptor by iommu-unmapping the data buffer and passing
* the packet up to the stack * the packet up to the stack. This function is called in softirq
* context, e.g. either bottom half from interrupt or NAPI polling context
*/ */
static int static int
spider_net_decode_one_descr(struct spider_net_card *card) spider_net_decode_one_descr(struct spider_net_card *card, int napi)
{ {
enum spider_net_descr_status status; enum spider_net_descr_status status;
struct spider_net_descr *descr; struct spider_net_descr *descr;
...@@ -1122,17 +1080,19 @@ spider_net_decode_one_descr(struct spider_net_card *card) ...@@ -1122,17 +1080,19 @@ spider_net_decode_one_descr(struct spider_net_card *card)
if (status == SPIDER_NET_DESCR_CARDOWNED) { if (status == SPIDER_NET_DESCR_CARDOWNED) {
/* nothing in the descriptor yet */ /* nothing in the descriptor yet */
return 0; result=0;
goto out;
} }
if (status == SPIDER_NET_DESCR_NOT_IN_USE) { if (status == SPIDER_NET_DESCR_NOT_IN_USE) {
/* not initialized yet, I bet chain->tail == chain->head /* not initialized yet, the ring must be empty */
* and the ring is empty */
spider_net_refill_rx_chain(card); spider_net_refill_rx_chain(card);
return 0; spider_net_enable_rxdmac(card);
result=0;
goto out;
} }
/* descriptor definitively used -- move on head */ /* descriptor definitively used -- move on tail */
chain->tail = descr->next; chain->tail = descr->next;
result = 0; result = 0;
...@@ -1143,6 +1103,9 @@ spider_net_decode_one_descr(struct spider_net_card *card) ...@@ -1143,6 +1103,9 @@ spider_net_decode_one_descr(struct spider_net_card *card)
pr_err("%s: dropping RX descriptor with state %d\n", pr_err("%s: dropping RX descriptor with state %d\n",
card->netdev->name, status); card->netdev->name, status);
card->netdev_stats.rx_dropped++; card->netdev_stats.rx_dropped++;
pci_unmap_single(card->pdev, descr->buf_addr,
SPIDER_NET_MAX_FRAME, PCI_DMA_BIDIRECTIONAL);
dev_kfree_skb_irq(descr->skb);
goto refill; goto refill;
} }
...@@ -1155,12 +1118,13 @@ spider_net_decode_one_descr(struct spider_net_card *card) ...@@ -1155,12 +1118,13 @@ spider_net_decode_one_descr(struct spider_net_card *card)
} }
/* ok, we've got a packet in descr */ /* ok, we've got a packet in descr */
result = spider_net_pass_skb_up(descr, card); result = spider_net_pass_skb_up(descr, card, napi);
refill: refill:
spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE); spider_net_set_descr_status(descr, SPIDER_NET_DESCR_NOT_IN_USE);
/* change the descriptor state: */ /* change the descriptor state: */
spider_net_refill_rx_chain(card); if (!napi)
spider_net_refill_rx_chain(card);
out:
return result; return result;
} }
...@@ -1186,7 +1150,7 @@ spider_net_poll(struct net_device *netdev, int *budget) ...@@ -1186,7 +1150,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
packets_to_do = min(*budget, netdev->quota); packets_to_do = min(*budget, netdev->quota);
while (packets_to_do) { while (packets_to_do) {
if (spider_net_decode_one_descr(card)) { if (spider_net_decode_one_descr(card, 1)) {
packets_done++; packets_done++;
packets_to_do--; packets_to_do--;
} else { } else {
...@@ -1198,6 +1162,7 @@ spider_net_poll(struct net_device *netdev, int *budget) ...@@ -1198,6 +1162,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
netdev->quota -= packets_done; netdev->quota -= packets_done;
*budget -= packets_done; *budget -= packets_done;
spider_net_refill_rx_chain(card);
/* if all packets are in the stack, enable interrupts and return 0 */ /* if all packets are in the stack, enable interrupts and return 0 */
/* if not, return 1 */ /* if not, return 1 */
...@@ -1341,6 +1306,24 @@ spider_net_enable_txdmac(struct spider_net_card *card) ...@@ -1341,6 +1306,24 @@ spider_net_enable_txdmac(struct spider_net_card *card)
card->tx_chain.tail->bus_addr); card->tx_chain.tail->bus_addr);
} }
/**
* spider_net_handle_rxram_full - cleans up RX ring upon RX RAM full interrupt
* @card: card structure
*
* spider_net_handle_rxram_full empties the RX ring so that spider can put
* more packets in it and empty its RX RAM. This is called in bottom half
* context
*/
static void
spider_net_handle_rxram_full(struct spider_net_card *card)
{
while (spider_net_decode_one_descr(card, 0))
;
spider_net_enable_rxchtails(card);
spider_net_enable_rxdmac(card);
netif_rx_schedule(card->netdev);
}
/** /**
* spider_net_handle_error_irq - handles errors raised by an interrupt * spider_net_handle_error_irq - handles errors raised by an interrupt
* @card: card structure * @card: card structure
...@@ -1449,17 +1432,21 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) ...@@ -1449,17 +1432,21 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
switch (i) switch (i)
{ {
case SPIDER_NET_GTMFLLINT: case SPIDER_NET_GTMFLLINT:
if (netif_msg_intr(card)) if (netif_msg_intr(card) && net_ratelimit())
pr_err("Spider TX RAM full\n"); pr_err("Spider TX RAM full\n");
show_error = 0; show_error = 0;
break; break;
case SPIDER_NET_GRFDFLLINT: /* fallthrough */
case SPIDER_NET_GRFCFLLINT: /* fallthrough */
case SPIDER_NET_GRFBFLLINT: /* fallthrough */
case SPIDER_NET_GRFAFLLINT: /* fallthrough */
case SPIDER_NET_GRMFLLINT: case SPIDER_NET_GRMFLLINT:
if (netif_msg_intr(card)) if (netif_msg_intr(card) && net_ratelimit())
pr_err("Spider RX RAM full, incoming packets " pr_err("Spider RX RAM full, incoming packets "
"might be discarded !\n"); "might be discarded!\n");
netif_rx_schedule(card->netdev); spider_net_rx_irq_off(card);
spider_net_enable_rxchtails(card); tasklet_schedule(&card->rxram_full_tl);
spider_net_enable_rxdmac(card); show_error = 0;
break; break;
/* case SPIDER_NET_GTMSHTINT: problem, print a message */ /* case SPIDER_NET_GTMSHTINT: problem, print a message */
...@@ -1467,10 +1454,6 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) ...@@ -1467,10 +1454,6 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
/* allrighty. tx from previous descr ok */ /* allrighty. tx from previous descr ok */
show_error = 0; show_error = 0;
break; break;
/* case SPIDER_NET_GRFDFLLINT: print a message down there */
/* case SPIDER_NET_GRFCFLLINT: print a message down there */
/* case SPIDER_NET_GRFBFLLINT: print a message down there */
/* case SPIDER_NET_GRFAFLLINT: print a message down there */
/* chain end */ /* chain end */
case SPIDER_NET_GDDDCEINT: /* fallthrough */ case SPIDER_NET_GDDDCEINT: /* fallthrough */
...@@ -1482,6 +1465,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) ...@@ -1482,6 +1465,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
"restarting DMAC %c.\n", "restarting DMAC %c.\n",
'D'+i-SPIDER_NET_GDDDCEINT); 'D'+i-SPIDER_NET_GDDDCEINT);
spider_net_refill_rx_chain(card); spider_net_refill_rx_chain(card);
spider_net_enable_rxdmac(card);
show_error = 0; show_error = 0;
break; break;
...@@ -1492,6 +1476,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) ...@@ -1492,6 +1476,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
case SPIDER_NET_GDAINVDINT: case SPIDER_NET_GDAINVDINT:
/* could happen when rx chain is full */ /* could happen when rx chain is full */
spider_net_refill_rx_chain(card); spider_net_refill_rx_chain(card);
spider_net_enable_rxdmac(card);
show_error = 0; show_error = 0;
break; break;
...@@ -1580,17 +1565,13 @@ spider_net_interrupt(int irq, void *ptr, struct pt_regs *regs) ...@@ -1580,17 +1565,13 @@ spider_net_interrupt(int irq, void *ptr, struct pt_regs *regs)
if (!status_reg) if (!status_reg)
return IRQ_NONE; return IRQ_NONE;
if (status_reg & SPIDER_NET_TXINT)
spider_net_release_tx_chain(card, 0);
if (status_reg & SPIDER_NET_RXINT ) { if (status_reg & SPIDER_NET_RXINT ) {
spider_net_rx_irq_off(card); spider_net_rx_irq_off(card);
netif_rx_schedule(netdev); netif_rx_schedule(netdev);
} }
/* we do this after rx and tx processing, as we want the tx chain if (status_reg & SPIDER_NET_ERRINT )
* processed to see, whether we should restart tx dma processing */ spider_net_handle_error_irq(card, status_reg);
spider_net_handle_error_irq(card, status_reg);
/* clear interrupt sources */ /* clear interrupt sources */
spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg); spider_net_write_reg(card, SPIDER_NET_GHIINT0STS, status_reg);
...@@ -1831,34 +1812,40 @@ spider_net_setup_phy(struct spider_net_card *card) ...@@ -1831,34 +1812,40 @@ spider_net_setup_phy(struct spider_net_card *card)
/** /**
* spider_net_download_firmware - loads firmware into the adapter * spider_net_download_firmware - loads firmware into the adapter
* @card: card structure * @card: card structure
* @firmware: firmware pointer * @firmware_ptr: pointer to firmware data
* *
* spider_net_download_firmware loads the firmware opened by * spider_net_download_firmware loads the firmware data into the
* spider_net_init_firmware into the adapter. * adapter. It assumes the length etc. to be allright.
*/ */
static void static int
spider_net_download_firmware(struct spider_net_card *card, spider_net_download_firmware(struct spider_net_card *card,
const struct firmware *firmware) u8 *firmware_ptr)
{ {
int sequencer, i; int sequencer, i;
u32 *fw_ptr = (u32 *)firmware->data; u32 *fw_ptr = (u32 *)firmware_ptr;
/* stop sequencers */ /* stop sequencers */
spider_net_write_reg(card, SPIDER_NET_GSINIT, spider_net_write_reg(card, SPIDER_NET_GSINIT,
SPIDER_NET_STOP_SEQ_VALUE); SPIDER_NET_STOP_SEQ_VALUE);
for (sequencer = 0; sequencer < 6; sequencer++) { for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
sequencer++) {
spider_net_write_reg(card, spider_net_write_reg(card,
SPIDER_NET_GSnPRGADR + sequencer * 8, 0); SPIDER_NET_GSnPRGADR + sequencer * 8, 0);
for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) { for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
sequencer * 8, *fw_ptr); sequencer * 8, *fw_ptr);
fw_ptr++; fw_ptr++;
} }
} }
if (spider_net_read_reg(card, SPIDER_NET_GSINIT))
return -EIO;
spider_net_write_reg(card, SPIDER_NET_GSINIT, spider_net_write_reg(card, SPIDER_NET_GSINIT,
SPIDER_NET_RUN_SEQ_VALUE); SPIDER_NET_RUN_SEQ_VALUE);
return 0;
} }
/** /**
...@@ -1890,31 +1877,53 @@ spider_net_download_firmware(struct spider_net_card *card, ...@@ -1890,31 +1877,53 @@ spider_net_download_firmware(struct spider_net_card *card,
static int static int
spider_net_init_firmware(struct spider_net_card *card) spider_net_init_firmware(struct spider_net_card *card)
{ {
const struct firmware *firmware; struct firmware *firmware = NULL;
int err = -EIO; struct device_node *dn;
u8 *fw_prop = NULL;
int err = -ENOENT;
int fw_size;
if (request_firmware((const struct firmware **)&firmware,
SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
netif_msg_probe(card) ) {
pr_err("Incorrect size of spidernet firmware in " \
"filesystem. Looking in host firmware...\n");
goto try_host_fw;
}
err = spider_net_download_firmware(card, firmware->data);
if (request_firmware(&firmware, release_firmware(firmware);
SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) < 0) { if (err)
if (netif_msg_probe(card)) goto try_host_fw;
pr_err("Couldn't read in sequencer data file %s.\n",
SPIDER_NET_FIRMWARE_NAME);
firmware = NULL;
goto out;
}
if (firmware->size != 6 * SPIDER_NET_FIRMWARE_LEN * sizeof(u32)) { goto done;
if (netif_msg_probe(card))
pr_err("Invalid size of sequencer data file %s.\n",
SPIDER_NET_FIRMWARE_NAME);
goto out;
} }
spider_net_download_firmware(card, firmware); try_host_fw:
dn = pci_device_to_OF_node(card->pdev);
if (!dn)
goto out_err;
err = 0; fw_prop = (u8 *)get_property(dn, "firmware", &fw_size);
out: if (!fw_prop)
release_firmware(firmware); goto out_err;
if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
netif_msg_probe(card) ) {
pr_err("Incorrect size of spidernet firmware in " \
"host firmware\n");
goto done;
}
err = spider_net_download_firmware(card, fw_prop);
done:
return err;
out_err:
if (netif_msg_probe(card))
pr_err("Couldn't find spidernet firmware in filesystem " \
"or host firmware\n");
return err; return err;
} }
...@@ -1934,10 +1943,11 @@ spider_net_workaround_rxramfull(struct spider_net_card *card) ...@@ -1934,10 +1943,11 @@ spider_net_workaround_rxramfull(struct spider_net_card *card)
SPIDER_NET_CKRCTRL_RUN_VALUE); SPIDER_NET_CKRCTRL_RUN_VALUE);
/* empty sequencer data */ /* empty sequencer data */
for (sequencer = 0; sequencer < 6; sequencer++) { for (sequencer = 0; sequencer < SPIDER_NET_FIRMWARE_SEQS;
sequencer++) {
spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
sequencer * 8, 0x0); sequencer * 8, 0x0);
for (i = 0; i < SPIDER_NET_FIRMWARE_LEN; i++) { for (i = 0; i < SPIDER_NET_FIRMWARE_SEQWORDS; i++) {
spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT + spider_net_write_reg(card, SPIDER_NET_GSnPRGDAT +
sequencer * 8, 0x0); sequencer * 8, 0x0);
} }
...@@ -2061,7 +2071,15 @@ spider_net_setup_netdev(struct spider_net_card *card) ...@@ -2061,7 +2071,15 @@ spider_net_setup_netdev(struct spider_net_card *card)
SET_NETDEV_DEV(netdev, &card->pdev->dev); SET_NETDEV_DEV(netdev, &card->pdev->dev);
pci_set_drvdata(card->pdev, netdev); pci_set_drvdata(card->pdev, netdev);
spin_lock_init(&card->intmask_lock);
atomic_set(&card->tx_chain_release,0);
card->rxram_full_tl.data = (unsigned long) card;
card->rxram_full_tl.func =
(void (*)(unsigned long)) spider_net_handle_rxram_full;
init_timer(&card->tx_timer);
card->tx_timer.function =
(void (*)(unsigned long)) spider_net_cleanup_tx_ring;
card->tx_timer.data = (unsigned long) card;
netdev->irq = card->pdev->irq; netdev->irq = card->pdev->irq;
card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
......
...@@ -33,25 +33,32 @@ extern struct ethtool_ops spider_net_ethtool_ops; ...@@ -33,25 +33,32 @@ extern struct ethtool_ops spider_net_ethtool_ops;
extern char spider_net_driver_name[]; extern char spider_net_driver_name[];
#define SPIDER_NET_MAX_MTU 2308 #define SPIDER_NET_MAX_FRAME 2312
#define SPIDER_NET_MAX_MTU 2294
#define SPIDER_NET_MIN_MTU 64 #define SPIDER_NET_MIN_MTU 64
#define SPIDER_NET_RXBUF_ALIGN 128 #define SPIDER_NET_RXBUF_ALIGN 128
#define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 64 #define SPIDER_NET_RX_DESCRIPTORS_DEFAULT 256
#define SPIDER_NET_RX_DESCRIPTORS_MIN 16 #define SPIDER_NET_RX_DESCRIPTORS_MIN 16
#define SPIDER_NET_RX_DESCRIPTORS_MAX 256 #define SPIDER_NET_RX_DESCRIPTORS_MAX 512
#define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 64 #define SPIDER_NET_TX_DESCRIPTORS_DEFAULT 256
#define SPIDER_NET_TX_DESCRIPTORS_MIN 16 #define SPIDER_NET_TX_DESCRIPTORS_MIN 16
#define SPIDER_NET_TX_DESCRIPTORS_MAX 256 #define SPIDER_NET_TX_DESCRIPTORS_MAX 512
#define SPIDER_NET_TX_TIMER 20
#define SPIDER_NET_RX_CSUM_DEFAULT 1 #define SPIDER_NET_RX_CSUM_DEFAULT 1
#define SPIDER_NET_WATCHDOG_TIMEOUT 5*HZ #define SPIDER_NET_WATCHDOG_TIMEOUT 50*HZ
#define SPIDER_NET_NAPI_WEIGHT 64 #define SPIDER_NET_NAPI_WEIGHT 64
#define SPIDER_NET_FIRMWARE_LEN 1024 #define SPIDER_NET_FIRMWARE_SEQS 6
#define SPIDER_NET_FIRMWARE_SEQWORDS 1024
#define SPIDER_NET_FIRMWARE_LEN (SPIDER_NET_FIRMWARE_SEQS * \
SPIDER_NET_FIRMWARE_SEQWORDS * \
sizeof(u32))
#define SPIDER_NET_FIRMWARE_NAME "spider_fw.bin" #define SPIDER_NET_FIRMWARE_NAME "spider_fw.bin"
/** spider_net SMMIO registers */ /** spider_net SMMIO registers */
...@@ -142,14 +149,12 @@ extern char spider_net_driver_name[]; ...@@ -142,14 +149,12 @@ extern char spider_net_driver_name[];
/** SCONFIG registers */ /** SCONFIG registers */
#define SPIDER_NET_SCONFIG_IOACTE 0x00002810 #define SPIDER_NET_SCONFIG_IOACTE 0x00002810
/** hardcoded register values */ /** interrupt mask registers */
#define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe3ff #define SPIDER_NET_INT0_MASK_VALUE 0x3f7fe2c7
#define SPIDER_NET_INT1_MASK_VALUE 0xffffffff #define SPIDER_NET_INT1_MASK_VALUE 0xffff7ff7
/* no MAC aborts -> auto retransmission */ /* no MAC aborts -> auto retransmission */
#define SPIDER_NET_INT2_MASK_VALUE 0xfffffff1 #define SPIDER_NET_INT2_MASK_VALUE 0xffef7ff1
/* clear counter when interrupt sources are cleared
#define SPIDER_NET_FRAMENUM_VALUE 0x0001f001 */
/* we rely on flagged descriptor interrupts */ /* we rely on flagged descriptor interrupts */
#define SPIDER_NET_FRAMENUM_VALUE 0x00000000 #define SPIDER_NET_FRAMENUM_VALUE 0x00000000
/* set this first, then the FRAMENUM_VALUE */ /* set this first, then the FRAMENUM_VALUE */
...@@ -168,7 +173,7 @@ extern char spider_net_driver_name[]; ...@@ -168,7 +173,7 @@ extern char spider_net_driver_name[];
#if 0 #if 0
#define SPIDER_NET_WOL_VALUE 0x00000000 #define SPIDER_NET_WOL_VALUE 0x00000000
#endif #endif
#define SPIDER_NET_IPSECINIT_VALUE 0x00f000f8 #define SPIDER_NET_IPSECINIT_VALUE 0x6f716f71
/* pause frames: automatic, no upper retransmission count */ /* pause frames: automatic, no upper retransmission count */
/* outside loopback mode: ETOMOD signal dont matter, not connected */ /* outside loopback mode: ETOMOD signal dont matter, not connected */
...@@ -318,6 +323,10 @@ enum spider_net_int2_status { ...@@ -318,6 +323,10 @@ enum spider_net_int2_status {
#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) | \ #define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) | \
(1 << SPIDER_NET_GRMFLLINT) ) (1 << SPIDER_NET_GRMFLLINT) )
#define SPIDER_NET_ERRINT ( 0xffffffff & \
(~SPIDER_NET_TXINT) & \
(~SPIDER_NET_RXINT) )
#define SPIDER_NET_GPREXEC 0x80000000 #define SPIDER_NET_GPREXEC 0x80000000
#define SPIDER_NET_GPRDAT_MASK 0x0000ffff #define SPIDER_NET_GPRDAT_MASK 0x0000ffff
...@@ -358,9 +367,6 @@ enum spider_net_int2_status { ...@@ -358,9 +367,6 @@ enum spider_net_int2_status {
/* descr ready, descr is in middle of chain, get interrupt on completion */ /* descr ready, descr is in middle of chain, get interrupt on completion */
#define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000 #define SPIDER_NET_DMAC_RX_CARDOWNED 0xa0800000
/* multicast is no problem */
#define SPIDER_NET_DATA_ERROR_MASK 0xffffbfff
enum spider_net_descr_status { enum spider_net_descr_status {
SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */ SPIDER_NET_DESCR_COMPLETE = 0x00, /* used in rx and tx */
SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */ SPIDER_NET_DESCR_RESPONSE_ERROR = 0x01, /* used in rx and tx */
...@@ -373,9 +379,9 @@ enum spider_net_descr_status { ...@@ -373,9 +379,9 @@ enum spider_net_descr_status {
struct spider_net_descr { struct spider_net_descr {
/* as defined by the hardware */ /* as defined by the hardware */
dma_addr_t buf_addr; u32 buf_addr;
u32 buf_size; u32 buf_size;
dma_addr_t next_descr_addr; u32 next_descr_addr;
u32 dmac_cmd_status; u32 dmac_cmd_status;
u32 result_size; u32 result_size;
u32 valid_size; /* all zeroes for tx */ u32 valid_size; /* all zeroes for tx */
...@@ -384,7 +390,7 @@ struct spider_net_descr { ...@@ -384,7 +390,7 @@ struct spider_net_descr {
/* used in the driver */ /* used in the driver */
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t bus_addr; u32 bus_addr;
struct spider_net_descr *next; struct spider_net_descr *next;
struct spider_net_descr *prev; struct spider_net_descr *prev;
} __attribute__((aligned(32))); } __attribute__((aligned(32)));
...@@ -396,21 +402,21 @@ struct spider_net_descr_chain { ...@@ -396,21 +402,21 @@ struct spider_net_descr_chain {
}; };
/* descriptor data_status bits */ /* descriptor data_status bits */
#define SPIDER_NET_RXIPCHK 29 #define SPIDER_NET_RX_IPCHK 29
#define SPIDER_NET_TCPUDPIPCHK 28 #define SPIDER_NET_RX_TCPCHK 28
#define SPIDER_NET_DATA_STATUS_CHK_MASK (1 << SPIDER_NET_RXIPCHK | \
1 << SPIDER_NET_TCPUDPIPCHK)
#define SPIDER_NET_VLAN_PACKET 21 #define SPIDER_NET_VLAN_PACKET 21
#define SPIDER_NET_DATA_STATUS_CKSUM_MASK ( (1 << SPIDER_NET_RX_IPCHK) | \
(1 << SPIDER_NET_RX_TCPCHK) )
/* descriptor data_error bits */ /* descriptor data_error bits */
#define SPIDER_NET_RXIPCHKERR 27 #define SPIDER_NET_RX_IPCHKERR 27
#define SPIDER_NET_RXTCPCHKERR 26 #define SPIDER_NET_RX_RXTCPCHKERR 28
#define SPIDER_NET_DATA_ERROR_CHK_MASK (1 << SPIDER_NET_RXIPCHKERR | \
1 << SPIDER_NET_RXTCPCHKERR) #define SPIDER_NET_DATA_ERR_CKSUM_MASK (1 << SPIDER_NET_RX_IPCHKERR)
/* the cases we don't pass the packet to the stack */ /* the cases we don't pass the packet to the stack.
#define SPIDER_NET_DESTROY_RX_FLAGS 0x70138000 * 701b8000 would be correct, but every packets gets that flag */
#define SPIDER_NET_DESTROY_RX_FLAGS 0x700b8000
#define SPIDER_NET_DESCR_SIZE 32 #define SPIDER_NET_DESCR_SIZE 32
...@@ -445,13 +451,16 @@ struct spider_net_card { ...@@ -445,13 +451,16 @@ struct spider_net_card {
struct spider_net_descr_chain tx_chain; struct spider_net_descr_chain tx_chain;
struct spider_net_descr_chain rx_chain; struct spider_net_descr_chain rx_chain;
spinlock_t chain_lock; atomic_t rx_chain_refill;
atomic_t tx_chain_release;
struct net_device_stats netdev_stats; struct net_device_stats netdev_stats;
struct spider_net_options options; struct spider_net_options options;
spinlock_t intmask_lock; spinlock_t intmask_lock;
struct tasklet_struct rxram_full_tl;
struct timer_list tx_timer;
struct work_struct tx_timeout_task; struct work_struct tx_timeout_task;
atomic_t tx_timeout_task_counter; atomic_t tx_timeout_task_counter;
......
...@@ -113,6 +113,23 @@ spider_net_ethtool_set_rx_csum(struct net_device *netdev, u32 n) ...@@ -113,6 +113,23 @@ spider_net_ethtool_set_rx_csum(struct net_device *netdev, u32 n)
return 0; return 0;
} }
static uint32_t
spider_net_ethtool_get_tx_csum(struct net_device *netdev)
{
return (netdev->features & NETIF_F_HW_CSUM) != 0;
}
static int
spider_net_ethtool_set_tx_csum(struct net_device *netdev, uint32_t data)
{
if (data)
netdev->features |= NETIF_F_HW_CSUM;
else
netdev->features &= ~NETIF_F_HW_CSUM;
return 0;
}
struct ethtool_ops spider_net_ethtool_ops = { struct ethtool_ops spider_net_ethtool_ops = {
.get_settings = spider_net_ethtool_get_settings, .get_settings = spider_net_ethtool_get_settings,
.get_drvinfo = spider_net_ethtool_get_drvinfo, .get_drvinfo = spider_net_ethtool_get_drvinfo,
...@@ -122,5 +139,7 @@ struct ethtool_ops spider_net_ethtool_ops = { ...@@ -122,5 +139,7 @@ struct ethtool_ops spider_net_ethtool_ops = {
.nway_reset = spider_net_ethtool_nway_reset, .nway_reset = spider_net_ethtool_nway_reset,
.get_rx_csum = spider_net_ethtool_get_rx_csum, .get_rx_csum = spider_net_ethtool_get_rx_csum,
.set_rx_csum = spider_net_ethtool_set_rx_csum, .set_rx_csum = spider_net_ethtool_set_rx_csum,
.get_tx_csum = spider_net_ethtool_get_tx_csum,
.set_tx_csum = spider_net_ethtool_set_tx_csum,
}; };
...@@ -5668,13 +5668,13 @@ static int airo_set_freq(struct net_device *dev, ...@@ -5668,13 +5668,13 @@ static int airo_set_freq(struct net_device *dev,
int channel = fwrq->m; int channel = fwrq->m;
/* We should do a better check than that, /* We should do a better check than that,
* based on the card capability !!! */ * based on the card capability !!! */
if((channel < 1) || (channel > 16)) { if((channel < 1) || (channel > 14)) {
printk(KERN_DEBUG "%s: New channel value of %d is invalid!\n", dev->name, fwrq->m); printk(KERN_DEBUG "%s: New channel value of %d is invalid!\n", dev->name, fwrq->m);
rc = -EINVAL; rc = -EINVAL;
} else { } else {
readConfigRid(local, 1); readConfigRid(local, 1);
/* Yes ! We can set it !!! */ /* Yes ! We can set it !!! */
local->config.channelSet = (u16)(channel - 1); local->config.channelSet = (u16) channel;
set_bit (FLAG_COMMIT, &local->flags); set_bit (FLAG_COMMIT, &local->flags);
} }
} }
...@@ -5692,6 +5692,7 @@ static int airo_get_freq(struct net_device *dev, ...@@ -5692,6 +5692,7 @@ static int airo_get_freq(struct net_device *dev,
{ {
struct airo_info *local = dev->priv; struct airo_info *local = dev->priv;
StatusRid status_rid; /* Card status info */ StatusRid status_rid; /* Card status info */
int ch;
readConfigRid(local, 1); readConfigRid(local, 1);
if ((local->config.opmode & 0xFF) == MODE_STA_ESS) if ((local->config.opmode & 0xFF) == MODE_STA_ESS)
...@@ -5699,16 +5700,14 @@ static int airo_get_freq(struct net_device *dev, ...@@ -5699,16 +5700,14 @@ static int airo_get_freq(struct net_device *dev,
else else
readStatusRid(local, &status_rid, 1); readStatusRid(local, &status_rid, 1);
#ifdef WEXT_USECHANNELS ch = (int)status_rid.channel;
fwrq->m = ((int)status_rid.channel) + 1; if((ch > 0) && (ch < 15)) {
fwrq->e = 0; fwrq->m = frequency_list[ch - 1] * 100000;
#else
{
int f = (int)status_rid.channel;
fwrq->m = frequency_list[f] * 100000;
fwrq->e = 1; fwrq->e = 1;
} else {
fwrq->m = ch;
fwrq->e = 0;
} }
#endif
return 0; return 0;
} }
...@@ -5783,7 +5782,7 @@ static int airo_get_essid(struct net_device *dev, ...@@ -5783,7 +5782,7 @@ static int airo_get_essid(struct net_device *dev,
/* If none, we may want to get the one that was set */ /* If none, we may want to get the one that was set */
/* Push it out ! */ /* Push it out ! */
dwrq->length = status_rid.SSIDlen + 1; dwrq->length = status_rid.SSIDlen;
dwrq->flags = 1; /* active */ dwrq->flags = 1; /* active */
return 0; return 0;
......
...@@ -1718,11 +1718,11 @@ static int atmel_get_essid(struct net_device *dev, ...@@ -1718,11 +1718,11 @@ static int atmel_get_essid(struct net_device *dev,
if (priv->new_SSID_size != 0) { if (priv->new_SSID_size != 0) {
memcpy(extra, priv->new_SSID, priv->new_SSID_size); memcpy(extra, priv->new_SSID, priv->new_SSID_size);
extra[priv->new_SSID_size] = '\0'; extra[priv->new_SSID_size] = '\0';
dwrq->length = priv->new_SSID_size + 1; dwrq->length = priv->new_SSID_size;
} else { } else {
memcpy(extra, priv->SSID, priv->SSID_size); memcpy(extra, priv->SSID, priv->SSID_size);
extra[priv->SSID_size] = '\0'; extra[priv->SSID_size] = '\0';
dwrq->length = priv->SSID_size + 1; dwrq->length = priv->SSID_size;
} }
dwrq->flags = !priv->connect_to_any_BSS; /* active */ dwrq->flags = !priv->connect_to_any_BSS; /* active */
......
...@@ -26,11 +26,25 @@ config HOSTAP_FIRMWARE ...@@ -26,11 +26,25 @@ config HOSTAP_FIRMWARE
depends on HOSTAP depends on HOSTAP
---help--- ---help---
Configure Host AP driver to include support for firmware image Configure Host AP driver to include support for firmware image
download. Current version supports only downloading to volatile, i.e., download. This option by itself only enables downloading to the
RAM memory. Flash upgrade is not yet supported. volatile memory, i.e. the card RAM. This option is required to
support cards that don't have firmware in flash, such as D-Link
DWL-520 rev E and D-Link DWL-650 rev P.
Firmware image downloading needs user space tool, prism2_srec. It is Firmware image downloading needs a user space tool, prism2_srec.
available from http://hostap.epitest.fi/. It is available from http://hostap.epitest.fi/.
config HOSTAP_FIRMWARE_NVRAM
bool "Support for non-volatile firmware download"
depends on HOSTAP_FIRMWARE
---help---
Allow Host AP driver to write firmware images to the non-volatile
card memory, i.e. flash memory that survives power cycling.
Enable this option if you want to be able to change card firmware
permanently.
Firmware image downloading needs a user space tool, prism2_srec.
It is available from http://hostap.epitest.fi/.
config HOSTAP_PLX config HOSTAP_PLX
tristate "Host AP driver for Prism2/2.5/3 in PLX9052 PCI adaptors" tristate "Host AP driver for Prism2/2.5/3 in PLX9052 PCI adaptors"
......
hostap-y := hostap_main.o hostap-y := hostap_80211_rx.o hostap_80211_tx.o hostap_ap.o hostap_info.o \
hostap_ioctl.o hostap_main.o hostap_proc.o
obj-$(CONFIG_HOSTAP) += hostap.o obj-$(CONFIG_HOSTAP) += hostap.o
obj-$(CONFIG_HOSTAP_CS) += hostap_cs.o obj-$(CONFIG_HOSTAP_CS) += hostap_cs.o
......
#ifndef HOSTAP_H #ifndef HOSTAP_H
#define HOSTAP_H #define HOSTAP_H
#include <linux/ethtool.h>
#include "hostap_wlan.h"
#include "hostap_ap.h"
static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
2447, 2452, 2457, 2462, 2467, 2472, 2484 };
#define FREQ_COUNT (sizeof(freq_list) / sizeof(freq_list[0]))
/* hostap.c */ /* hostap.c */
extern struct proc_dir_entry *hostap_proc; extern struct proc_dir_entry *hostap_proc;
...@@ -40,6 +49,26 @@ int prism2_update_comms_qual(struct net_device *dev); ...@@ -40,6 +49,26 @@ int prism2_update_comms_qual(struct net_device *dev);
int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype, int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype,
u8 *body, size_t bodylen); u8 *body, size_t bodylen);
int prism2_sta_deauth(local_info_t *local, u16 reason); int prism2_sta_deauth(local_info_t *local, u16 reason);
int prism2_wds_add(local_info_t *local, u8 *remote_addr,
int rtnl_locked);
int prism2_wds_del(local_info_t *local, u8 *remote_addr,
int rtnl_locked, int do_not_remove);
/* hostap_ap.c */
int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac);
int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac);
void ap_control_flush_macs(struct mac_restrictions *mac_restrictions);
int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac);
void ap_control_kickall(struct ap_data *ap);
void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
struct ieee80211_crypt_data ***crypt);
int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
struct iw_quality qual[], int buf_size,
int aplist);
int prism2_ap_translate_scan(struct net_device *dev, char *buffer);
int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param);
/* hostap_proc.c */ /* hostap_proc.c */
...@@ -54,4 +83,12 @@ void hostap_info_init(local_info_t *local); ...@@ -54,4 +83,12 @@ void hostap_info_init(local_info_t *local);
void hostap_info_process(local_info_t *local, struct sk_buff *skb); void hostap_info_process(local_info_t *local, struct sk_buff *skb);
/* hostap_ioctl.c */
extern const struct iw_handler_def hostap_iw_handler_def;
extern struct ethtool_ops prism2_ethtool_ops;
int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
#endif /* HOSTAP_H */ #endif /* HOSTAP_H */
#ifndef HOSTAP_80211_H #ifndef HOSTAP_80211_H
#define HOSTAP_80211_H #define HOSTAP_80211_H
#include <linux/types.h>
#include <net/ieee80211_crypt.h>
struct hostap_ieee80211_mgmt { struct hostap_ieee80211_mgmt {
u16 frame_control; u16 frame_control;
u16 duration; u16 duration;
......
#include <linux/etherdevice.h> #include <linux/etherdevice.h>
#include <net/ieee80211_crypt.h>
#include "hostap_80211.h" #include "hostap_80211.h"
#include "hostap.h" #include "hostap.h"
#include "hostap_ap.h"
/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
static unsigned char rfc1042_header[] =
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
static unsigned char bridge_tunnel_header[] =
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
/* No encapsulation header if EtherType < 0x600 (=length) */
void hostap_dump_rx_80211(const char *name, struct sk_buff *skb, void hostap_dump_rx_80211(const char *name, struct sk_buff *skb,
struct hostap_80211_rx_status *rx_stats) struct hostap_80211_rx_status *rx_stats)
......
#include "hostap_80211.h"
#include "hostap_common.h"
#include "hostap_wlan.h"
#include "hostap.h"
#include "hostap_ap.h"
/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
static unsigned char rfc1042_header[] =
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
static unsigned char bridge_tunnel_header[] =
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
/* No encapsulation header if EtherType < 0x600 (=length) */
void hostap_dump_tx_80211(const char *name, struct sk_buff *skb) void hostap_dump_tx_80211(const char *name, struct sk_buff *skb)
{ {
struct ieee80211_hdr_4addr *hdr; struct ieee80211_hdr_4addr *hdr;
......
...@@ -16,6 +16,14 @@ ...@@ -16,6 +16,14 @@
* (8802.11: 5.5) * (8802.11: 5.5)
*/ */
#include <linux/proc_fs.h>
#include <linux/delay.h>
#include <linux/random.h>
#include "hostap_wlan.h"
#include "hostap.h"
#include "hostap_ap.h"
static int other_ap_policy[MAX_PARM_DEVICES] = { AP_OTHER_AP_SKIP_ALL, static int other_ap_policy[MAX_PARM_DEVICES] = { AP_OTHER_AP_SKIP_ALL,
DEF_INTS }; DEF_INTS };
module_param_array(other_ap_policy, int, NULL, 0444); module_param_array(other_ap_policy, int, NULL, 0444);
...@@ -360,8 +368,7 @@ static int ap_control_proc_read(char *page, char **start, off_t off, ...@@ -360,8 +368,7 @@ static int ap_control_proc_read(char *page, char **start, off_t off,
} }
static int ap_control_add_mac(struct mac_restrictions *mac_restrictions, int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
u8 *mac)
{ {
struct mac_entry *entry; struct mac_entry *entry;
...@@ -380,8 +387,7 @@ static int ap_control_add_mac(struct mac_restrictions *mac_restrictions, ...@@ -380,8 +387,7 @@ static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
} }
static int ap_control_del_mac(struct mac_restrictions *mac_restrictions, int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
u8 *mac)
{ {
struct list_head *ptr; struct list_head *ptr;
struct mac_entry *entry; struct mac_entry *entry;
...@@ -433,7 +439,7 @@ static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions, ...@@ -433,7 +439,7 @@ static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions,
} }
static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions) void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
{ {
struct list_head *ptr, *n; struct list_head *ptr, *n;
struct mac_entry *entry; struct mac_entry *entry;
...@@ -454,8 +460,7 @@ static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions) ...@@ -454,8 +460,7 @@ static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions)
} }
static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac)
u8 *mac)
{ {
struct sta_info *sta; struct sta_info *sta;
u16 resp; u16 resp;
...@@ -486,7 +491,7 @@ static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, ...@@ -486,7 +491,7 @@ static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
static void ap_control_kickall(struct ap_data *ap) void ap_control_kickall(struct ap_data *ap)
{ {
struct list_head *ptr, *n; struct list_head *ptr, *n;
struct sta_info *sta; struct sta_info *sta;
...@@ -2321,9 +2326,9 @@ static void schedule_packet_send(local_info_t *local, struct sta_info *sta) ...@@ -2321,9 +2326,9 @@ static void schedule_packet_send(local_info_t *local, struct sta_info *sta)
} }
static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[], int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
struct iw_quality qual[], int buf_size, struct iw_quality qual[], int buf_size,
int aplist) int aplist)
{ {
struct ap_data *ap = local->ap; struct ap_data *ap = local->ap;
struct list_head *ptr; struct list_head *ptr;
...@@ -2363,7 +2368,7 @@ static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[], ...@@ -2363,7 +2368,7 @@ static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
/* Translate our list of Access Points & Stations to a card independant /* Translate our list of Access Points & Stations to a card independant
* format that the Wireless Tools will understand - Jean II */ * format that the Wireless Tools will understand - Jean II */
static int prism2_ap_translate_scan(struct net_device *dev, char *buffer) int prism2_ap_translate_scan(struct net_device *dev, char *buffer)
{ {
struct hostap_interface *iface; struct hostap_interface *iface;
local_info_t *local; local_info_t *local;
...@@ -2608,8 +2613,7 @@ static int prism2_hostapd_sta_clear_stats(struct ap_data *ap, ...@@ -2608,8 +2613,7 @@ static int prism2_hostapd_sta_clear_stats(struct ap_data *ap,
} }
static int prism2_hostapd(struct ap_data *ap, int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param)
struct prism2_hostapd_param *param)
{ {
switch (param->cmd) { switch (param->cmd) {
case PRISM2_HOSTAPD_FLUSH: case PRISM2_HOSTAPD_FLUSH:
...@@ -3207,8 +3211,8 @@ void hostap_update_rates(local_info_t *local) ...@@ -3207,8 +3211,8 @@ void hostap_update_rates(local_info_t *local)
} }
static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent, void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
struct ieee80211_crypt_data ***crypt) struct ieee80211_crypt_data ***crypt)
{ {
struct sta_info *sta; struct sta_info *sta;
......
#ifndef HOSTAP_AP_H #ifndef HOSTAP_AP_H
#define HOSTAP_AP_H #define HOSTAP_AP_H
#include "hostap_80211.h"
/* AP data structures for STAs */ /* AP data structures for STAs */
/* maximum number of frames to buffer per STA */ /* maximum number of frames to buffer per STA */
......
#ifndef HOSTAP_COMMON_H #ifndef HOSTAP_COMMON_H
#define HOSTAP_COMMON_H #define HOSTAP_COMMON_H
#include <linux/types.h>
#include <linux/if_ether.h>
#define BIT(x) (1 << (x)) #define BIT(x) (1 << (x))
#define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5] #define MAC2STR(a) (a)[0], (a)[1], (a)[2], (a)[3], (a)[4], (a)[5]
......
...@@ -21,15 +21,10 @@ ...@@ -21,15 +21,10 @@
#define PRISM2_DOWNLOAD_SUPPORT #define PRISM2_DOWNLOAD_SUPPORT
#endif #endif
#ifdef PRISM2_DOWNLOAD_SUPPORT /* Allow kernel configuration to enable non-volatile download support. */
/* Allow writing firmware images into flash, i.e., to non-volatile storage. #ifdef CONFIG_HOSTAP_FIRMWARE_NVRAM
* Before you enable this option, you should make absolutely sure that you are #define PRISM2_NON_VOLATILE_DOWNLOAD
* using prism2_srec utility that comes with THIS version of the driver! #endif
* In addition, please note that it is possible to kill your card with
* non-volatile download if you are using incorrect image. This feature has not
* been fully tested, so please be careful with it. */
/* #define PRISM2_NON_VOLATILE_DOWNLOAD */
#endif /* PRISM2_DOWNLOAD_SUPPORT */
/* Save low-level I/O for debugging. This should not be enabled in normal use. /* Save low-level I/O for debugging. This should not be enabled in normal use.
*/ */
......
/* Host AP driver Info Frame processing (part of hostap.o module) */ /* Host AP driver Info Frame processing (part of hostap.o module) */
#include "hostap_wlan.h"
#include "hostap.h"
#include "hostap_ap.h"
/* Called only as a tasklet (software IRQ) */ /* Called only as a tasklet (software IRQ) */
static void prism2_info_commtallies16(local_info_t *local, unsigned char *buf, static void prism2_info_commtallies16(local_info_t *local, unsigned char *buf,
......
/* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */ /* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */
#ifdef in_atomic #include <linux/types.h>
/* Get kernel_locked() for in_atomic() */
#include <linux/smp_lock.h> #include <linux/smp_lock.h>
#endif
#include <linux/ethtool.h> #include <linux/ethtool.h>
#include <net/ieee80211_crypt.h>
#include "hostap_wlan.h"
#include "hostap.h"
#include "hostap_ap.h"
static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev) static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev)
{ {
...@@ -3910,7 +3912,7 @@ static void prism2_get_drvinfo(struct net_device *dev, ...@@ -3910,7 +3912,7 @@ static void prism2_get_drvinfo(struct net_device *dev,
local->sta_fw_ver & 0xff); local->sta_fw_ver & 0xff);
} }
static struct ethtool_ops prism2_ethtool_ops = { struct ethtool_ops prism2_ethtool_ops = {
.get_drvinfo = prism2_get_drvinfo .get_drvinfo = prism2_get_drvinfo
}; };
...@@ -3985,7 +3987,7 @@ static const iw_handler prism2_private_handler[] = ...@@ -3985,7 +3987,7 @@ static const iw_handler prism2_private_handler[] =
(iw_handler) prism2_ioctl_priv_readmif, /* 3 */ (iw_handler) prism2_ioctl_priv_readmif, /* 3 */
}; };
static const struct iw_handler_def hostap_iw_handler_def = const struct iw_handler_def hostap_iw_handler_def =
{ {
.num_standard = sizeof(prism2_handler) / sizeof(iw_handler), .num_standard = sizeof(prism2_handler) / sizeof(iw_handler),
.num_private = sizeof(prism2_private_handler) / sizeof(iw_handler), .num_private = sizeof(prism2_private_handler) / sizeof(iw_handler),
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/kmod.h> #include <linux/kmod.h>
#include <linux/rtnetlink.h> #include <linux/rtnetlink.h>
#include <linux/wireless.h> #include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <net/iw_handler.h> #include <net/iw_handler.h>
#include <net/ieee80211.h> #include <net/ieee80211.h>
#include <net/ieee80211_crypt.h> #include <net/ieee80211_crypt.h>
...@@ -47,57 +48,6 @@ MODULE_VERSION(PRISM2_VERSION); ...@@ -47,57 +48,6 @@ MODULE_VERSION(PRISM2_VERSION);
#define PRISM2_MAX_MTU (PRISM2_MAX_FRAME_SIZE - (6 /* LLC */ + 8 /* WEP */)) #define PRISM2_MAX_MTU (PRISM2_MAX_FRAME_SIZE - (6 /* LLC */ + 8 /* WEP */))
/* hostap.c */
static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
int rtnl_locked);
static int prism2_wds_del(local_info_t *local, u8 *remote_addr,
int rtnl_locked, int do_not_remove);
/* hostap_ap.c */
static int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[],
struct iw_quality qual[], int buf_size,
int aplist);
static int prism2_ap_translate_scan(struct net_device *dev, char *buffer);
static int prism2_hostapd(struct ap_data *ap,
struct prism2_hostapd_param *param);
static void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
struct ieee80211_crypt_data ***crypt);
static void ap_control_kickall(struct ap_data *ap);
#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
static int ap_control_add_mac(struct mac_restrictions *mac_restrictions,
u8 *mac);
static int ap_control_del_mac(struct mac_restrictions *mac_restrictions,
u8 *mac);
static void ap_control_flush_macs(struct mac_restrictions *mac_restrictions);
static int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev,
u8 *mac);
#endif /* !PRISM2_NO_KERNEL_IEEE80211_MGMT */
static const long freq_list[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442,
2447, 2452, 2457, 2462, 2467, 2472, 2484 };
#define FREQ_COUNT (sizeof(freq_list) / sizeof(freq_list[0]))
/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
static unsigned char rfc1042_header[] =
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 };
/* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */
static unsigned char bridge_tunnel_header[] =
{ 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 };
/* No encapsulation header if EtherType < 0x600 (=length) */
/* FIX: these could be compiled separately and linked together to hostap.o */
#include "hostap_ap.c"
#include "hostap_info.c"
#include "hostap_ioctl.c"
#include "hostap_proc.c"
#include "hostap_80211_rx.c"
#include "hostap_80211_tx.c"
struct net_device * hostap_add_interface(struct local_info *local, struct net_device * hostap_add_interface(struct local_info *local,
int type, int rtnl_locked, int type, int rtnl_locked,
const char *prefix, const char *prefix,
...@@ -196,8 +146,8 @@ static inline int prism2_wds_special_addr(u8 *addr) ...@@ -196,8 +146,8 @@ static inline int prism2_wds_special_addr(u8 *addr)
} }
static int prism2_wds_add(local_info_t *local, u8 *remote_addr, int prism2_wds_add(local_info_t *local, u8 *remote_addr,
int rtnl_locked) int rtnl_locked)
{ {
struct net_device *dev; struct net_device *dev;
struct list_head *ptr; struct list_head *ptr;
...@@ -258,8 +208,8 @@ static int prism2_wds_add(local_info_t *local, u8 *remote_addr, ...@@ -258,8 +208,8 @@ static int prism2_wds_add(local_info_t *local, u8 *remote_addr,
} }
static int prism2_wds_del(local_info_t *local, u8 *remote_addr, int prism2_wds_del(local_info_t *local, u8 *remote_addr,
int rtnl_locked, int do_not_remove) int rtnl_locked, int do_not_remove)
{ {
unsigned long flags; unsigned long flags;
struct list_head *ptr; struct list_head *ptr;
......
/* /proc routines for Host AP driver */ /* /proc routines for Host AP driver */
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <net/ieee80211_crypt.h>
#include "hostap_wlan.h"
#include "hostap.h"
#define PROC_LIMIT (PAGE_SIZE - 80) #define PROC_LIMIT (PAGE_SIZE - 80)
......
#ifndef HOSTAP_WLAN_H #ifndef HOSTAP_WLAN_H
#define HOSTAP_WLAN_H #define HOSTAP_WLAN_H
#include <linux/wireless.h>
#include <linux/netdevice.h>
#include <net/iw_handler.h>
#include "hostap_config.h" #include "hostap_config.h"
#include "hostap_common.h" #include "hostap_common.h"
......
...@@ -5735,70 +5735,6 @@ static struct net_device_stats *ipw2100_stats(struct net_device *dev) ...@@ -5735,70 +5735,6 @@ static struct net_device_stats *ipw2100_stats(struct net_device *dev)
return &priv->ieee->stats; return &priv->ieee->stats;
} }
#if WIRELESS_EXT < 18
/* Support for wpa_supplicant before WE-18, deprecated. */
/* following definitions must match definitions in driver_ipw.c */
#define IPW2100_IOCTL_WPA_SUPPLICANT SIOCIWFIRSTPRIV+30
#define IPW2100_CMD_SET_WPA_PARAM 1
#define IPW2100_CMD_SET_WPA_IE 2
#define IPW2100_CMD_SET_ENCRYPTION 3
#define IPW2100_CMD_MLME 4
#define IPW2100_PARAM_WPA_ENABLED 1
#define IPW2100_PARAM_TKIP_COUNTERMEASURES 2
#define IPW2100_PARAM_DROP_UNENCRYPTED 3
#define IPW2100_PARAM_PRIVACY_INVOKED 4
#define IPW2100_PARAM_AUTH_ALGS 5
#define IPW2100_PARAM_IEEE_802_1X 6
#define IPW2100_MLME_STA_DEAUTH 1
#define IPW2100_MLME_STA_DISASSOC 2
#define IPW2100_CRYPT_ERR_UNKNOWN_ALG 2
#define IPW2100_CRYPT_ERR_UNKNOWN_ADDR 3
#define IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED 4
#define IPW2100_CRYPT_ERR_KEY_SET_FAILED 5
#define IPW2100_CRYPT_ERR_TX_KEY_SET_FAILED 6
#define IPW2100_CRYPT_ERR_CARD_CONF_FAILED 7
#define IPW2100_CRYPT_ALG_NAME_LEN 16
struct ipw2100_param {
u32 cmd;
u8 sta_addr[ETH_ALEN];
union {
struct {
u8 name;
u32 value;
} wpa_param;
struct {
u32 len;
u8 reserved[32];
u8 data[0];
} wpa_ie;
struct {
u32 command;
u32 reason_code;
} mlme;
struct {
u8 alg[IPW2100_CRYPT_ALG_NAME_LEN];
u8 set_tx;
u32 err;
u8 idx;
u8 seq[8]; /* sequence counter (set: RX, get: TX) */
u16 key_len;
u8 key[0];
} crypt;
} u;
};
/* end of driver_ipw.c code */
#endif /* WIRELESS_EXT < 18 */
static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value) static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value)
{ {
/* This is called when wpa_supplicant loads and closes the driver /* This is called when wpa_supplicant loads and closes the driver
...@@ -5807,11 +5743,6 @@ static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value) ...@@ -5807,11 +5743,6 @@ static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value)
return 0; return 0;
} }
#if WIRELESS_EXT < 18
#define IW_AUTH_ALG_OPEN_SYSTEM 0x1
#define IW_AUTH_ALG_SHARED_KEY 0x2
#endif
static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value) static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value)
{ {
...@@ -5855,360 +5786,6 @@ void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv, ...@@ -5855,360 +5786,6 @@ void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv,
ipw2100_set_wpa_ie(priv, &frame, 0); ipw2100_set_wpa_ie(priv, &frame, 0);
} }
#if WIRELESS_EXT < 18
static int ipw2100_wpa_set_param(struct net_device *dev, u8 name, u32 value)
{
struct ipw2100_priv *priv = ieee80211_priv(dev);
struct ieee80211_crypt_data *crypt;
unsigned long flags;
int ret = 0;
switch (name) {
case IPW2100_PARAM_WPA_ENABLED:
ret = ipw2100_wpa_enable(priv, value);
break;
case IPW2100_PARAM_TKIP_COUNTERMEASURES:
crypt = priv->ieee->crypt[priv->ieee->tx_keyidx];
if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags)
break;
flags = crypt->ops->get_flags(crypt->priv);
if (value)
flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
else
flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES;
crypt->ops->set_flags(flags, crypt->priv);
break;
case IPW2100_PARAM_DROP_UNENCRYPTED:{
/* See IW_AUTH_DROP_UNENCRYPTED handling for details */
struct ieee80211_security sec = {
.flags = SEC_ENABLED,
.enabled = value,
};
priv->ieee->drop_unencrypted = value;
/* We only change SEC_LEVEL for open mode. Others
* are set by ipw_wpa_set_encryption.
*/
if (!value) {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_0;
} else {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_1;
}
if (priv->ieee->set_security)
priv->ieee->set_security(priv->ieee->dev, &sec);
break;
}
case IPW2100_PARAM_PRIVACY_INVOKED:
priv->ieee->privacy_invoked = value;
break;
case IPW2100_PARAM_AUTH_ALGS:
ret = ipw2100_wpa_set_auth_algs(priv, value);
break;
case IPW2100_PARAM_IEEE_802_1X:
priv->ieee->ieee802_1x = value;
break;
default:
printk(KERN_ERR DRV_NAME ": %s: Unknown WPA param: %d\n",
dev->name, name);
ret = -EOPNOTSUPP;
}
return ret;
}
static int ipw2100_wpa_mlme(struct net_device *dev, int command, int reason)
{
struct ipw2100_priv *priv = ieee80211_priv(dev);
int ret = 0;
switch (command) {
case IPW2100_MLME_STA_DEAUTH:
// silently ignore
break;
case IPW2100_MLME_STA_DISASSOC:
ipw2100_disassociate_bssid(priv);
break;
default:
printk(KERN_ERR DRV_NAME ": %s: Unknown MLME request: %d\n",
dev->name, command);
ret = -EOPNOTSUPP;
}
return ret;
}
static int ipw2100_wpa_set_wpa_ie(struct net_device *dev,
struct ipw2100_param *param, int plen)
{
struct ipw2100_priv *priv = ieee80211_priv(dev);
struct ieee80211_device *ieee = priv->ieee;
u8 *buf;
if (!ieee->wpa_enabled)
return -EOPNOTSUPP;
if (param->u.wpa_ie.len > MAX_WPA_IE_LEN ||
(param->u.wpa_ie.len && param->u.wpa_ie.data == NULL))
return -EINVAL;
if (param->u.wpa_ie.len) {
buf = kmalloc(param->u.wpa_ie.len, GFP_KERNEL);
if (buf == NULL)
return -ENOMEM;
memcpy(buf, param->u.wpa_ie.data, param->u.wpa_ie.len);
kfree(ieee->wpa_ie);
ieee->wpa_ie = buf;
ieee->wpa_ie_len = param->u.wpa_ie.len;
} else {
kfree(ieee->wpa_ie);
ieee->wpa_ie = NULL;
ieee->wpa_ie_len = 0;
}
ipw2100_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len);
return 0;
}
/* implementation borrowed from hostap driver */
static int ipw2100_wpa_set_encryption(struct net_device *dev,
struct ipw2100_param *param,
int param_len)
{
int ret = 0;
struct ipw2100_priv *priv = ieee80211_priv(dev);
struct ieee80211_device *ieee = priv->ieee;
struct ieee80211_crypto_ops *ops;
struct ieee80211_crypt_data **crypt;
struct ieee80211_security sec = {
.flags = 0,
};
param->u.crypt.err = 0;
param->u.crypt.alg[IPW2100_CRYPT_ALG_NAME_LEN - 1] = '\0';
if (param_len !=
(int)((char *)param->u.crypt.key - (char *)param) +
param->u.crypt.key_len) {
IPW_DEBUG_INFO("Len mismatch %d, %d\n", param_len,
param->u.crypt.key_len);
return -EINVAL;
}
if (param->sta_addr[0] == 0xff && param->sta_addr[1] == 0xff &&
param->sta_addr[2] == 0xff && param->sta_addr[3] == 0xff &&
param->sta_addr[4] == 0xff && param->sta_addr[5] == 0xff) {
if (param->u.crypt.idx >= WEP_KEYS)
return -EINVAL;
crypt = &ieee->crypt[param->u.crypt.idx];
} else {
return -EINVAL;
}
sec.flags |= SEC_ENABLED | SEC_ENCRYPT;
if (strcmp(param->u.crypt.alg, "none") == 0) {
if (crypt) {
sec.enabled = 0;
sec.encrypt = 0;
sec.level = SEC_LEVEL_0;
sec.flags |= SEC_LEVEL;
ieee80211_crypt_delayed_deinit(ieee, crypt);
}
goto done;
}
sec.enabled = 1;
sec.encrypt = 1;
ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) {
request_module("ieee80211_crypt_wep");
ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
} else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) {
request_module("ieee80211_crypt_tkip");
ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
} else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) {
request_module("ieee80211_crypt_ccmp");
ops = ieee80211_get_crypto_ops(param->u.crypt.alg);
}
if (ops == NULL) {
IPW_DEBUG_INFO("%s: unknown crypto alg '%s'\n",
dev->name, param->u.crypt.alg);
param->u.crypt.err = IPW2100_CRYPT_ERR_UNKNOWN_ALG;
ret = -EINVAL;
goto done;
}
if (*crypt == NULL || (*crypt)->ops != ops) {
struct ieee80211_crypt_data *new_crypt;
ieee80211_crypt_delayed_deinit(ieee, crypt);
new_crypt = kzalloc(sizeof(struct ieee80211_crypt_data), GFP_KERNEL);
if (new_crypt == NULL) {
ret = -ENOMEM;
goto done;
}
new_crypt->ops = ops;
if (new_crypt->ops && try_module_get(new_crypt->ops->owner))
new_crypt->priv =
new_crypt->ops->init(param->u.crypt.idx);
if (new_crypt->priv == NULL) {
kfree(new_crypt);
param->u.crypt.err =
IPW2100_CRYPT_ERR_CRYPT_INIT_FAILED;
ret = -EINVAL;
goto done;
}
*crypt = new_crypt;
}
if (param->u.crypt.key_len > 0 && (*crypt)->ops->set_key &&
(*crypt)->ops->set_key(param->u.crypt.key,
param->u.crypt.key_len, param->u.crypt.seq,
(*crypt)->priv) < 0) {
IPW_DEBUG_INFO("%s: key setting failed\n", dev->name);
param->u.crypt.err = IPW2100_CRYPT_ERR_KEY_SET_FAILED;
ret = -EINVAL;
goto done;
}
if (param->u.crypt.set_tx) {
ieee->tx_keyidx = param->u.crypt.idx;
sec.active_key = param->u.crypt.idx;
sec.flags |= SEC_ACTIVE_KEY;
}
if (ops->name != NULL) {
if (strcmp(ops->name, "WEP") == 0) {
memcpy(sec.keys[param->u.crypt.idx],
param->u.crypt.key, param->u.crypt.key_len);
sec.key_sizes[param->u.crypt.idx] =
param->u.crypt.key_len;
sec.flags |= (1 << param->u.crypt.idx);
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_1;
} else if (strcmp(ops->name, "TKIP") == 0) {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_2;
} else if (strcmp(ops->name, "CCMP") == 0) {
sec.flags |= SEC_LEVEL;
sec.level = SEC_LEVEL_3;
}
}
done:
if (ieee->set_security)
ieee->set_security(ieee->dev, &sec);
/* Do not reset port if card is in Managed mode since resetting will
* generate new IEEE 802.11 authentication which may end up in looping
* with IEEE 802.1X. If your hardware requires a reset after WEP
* configuration (for example... Prism2), implement the reset_port in
* the callbacks structures used to initialize the 802.11 stack. */
if (ieee->reset_on_keychange &&
ieee->iw_mode != IW_MODE_INFRA &&
ieee->reset_port && ieee->reset_port(dev)) {
IPW_DEBUG_INFO("%s: reset_port failed\n", dev->name);
param->u.crypt.err = IPW2100_CRYPT_ERR_CARD_CONF_FAILED;
return -EINVAL;
}
return ret;
}
static int ipw2100_wpa_supplicant(struct net_device *dev, struct iw_point *p)
{
struct ipw2100_param *param;
int ret = 0;
IPW_DEBUG_IOCTL("wpa_supplicant: len=%d\n", p->length);
if (p->length < sizeof(struct ipw2100_param) || !p->pointer)
return -EINVAL;
param = (struct ipw2100_param *)kmalloc(p->length, GFP_KERNEL);
if (param == NULL)
return -ENOMEM;
if (copy_from_user(param, p->pointer, p->length)) {
kfree(param);
return -EFAULT;
}
switch (param->cmd) {
case IPW2100_CMD_SET_WPA_PARAM:
ret = ipw2100_wpa_set_param(dev, param->u.wpa_param.name,
param->u.wpa_param.value);
break;
case IPW2100_CMD_SET_WPA_IE:
ret = ipw2100_wpa_set_wpa_ie(dev, param, p->length);
break;
case IPW2100_CMD_SET_ENCRYPTION:
ret = ipw2100_wpa_set_encryption(dev, param, p->length);
break;
case IPW2100_CMD_MLME:
ret = ipw2100_wpa_mlme(dev, param->u.mlme.command,
param->u.mlme.reason_code);
break;
default:
printk(KERN_ERR DRV_NAME
": %s: Unknown WPA supplicant request: %d\n", dev->name,
param->cmd);
ret = -EOPNOTSUPP;
}
if (ret == 0 && copy_to_user(p->pointer, param, p->length))
ret = -EFAULT;
kfree(param);
return ret;
}
static int ipw2100_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct iwreq *wrq = (struct iwreq *)rq;
int ret = -1;
switch (cmd) {
case IPW2100_IOCTL_WPA_SUPPLICANT:
ret = ipw2100_wpa_supplicant(dev, &wrq->u.data);
return ret;
default:
return -EOPNOTSUPP;
}
return -EOPNOTSUPP;
}
#endif /* WIRELESS_EXT < 18 */
static void ipw_ethtool_get_drvinfo(struct net_device *dev, static void ipw_ethtool_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info) struct ethtool_drvinfo *info)
{ {
...@@ -6337,9 +5914,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, ...@@ -6337,9 +5914,6 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
dev->open = ipw2100_open; dev->open = ipw2100_open;
dev->stop = ipw2100_close; dev->stop = ipw2100_close;
dev->init = ipw2100_net_init; dev->init = ipw2100_net_init;
#if WIRELESS_EXT < 18
dev->do_ioctl = ipw2100_ioctl;
#endif
dev->get_stats = ipw2100_stats; dev->get_stats = ipw2100_stats;
dev->ethtool_ops = &ipw2100_ethtool_ops; dev->ethtool_ops = &ipw2100_ethtool_ops;
dev->tx_timeout = ipw2100_tx_timeout; dev->tx_timeout = ipw2100_tx_timeout;
...@@ -7855,7 +7429,6 @@ static int ipw2100_wx_get_power(struct net_device *dev, ...@@ -7855,7 +7429,6 @@ static int ipw2100_wx_get_power(struct net_device *dev,
return 0; return 0;
} }
#if WIRELESS_EXT > 17
/* /*
* WE-18 WPA support * WE-18 WPA support
*/ */
...@@ -8117,7 +7690,6 @@ static int ipw2100_wx_set_mlme(struct net_device *dev, ...@@ -8117,7 +7690,6 @@ static int ipw2100_wx_set_mlme(struct net_device *dev,
} }
return 0; return 0;
} }
#endif /* WIRELESS_EXT > 17 */
/* /*
* *
...@@ -8350,11 +7922,7 @@ static iw_handler ipw2100_wx_handlers[] = { ...@@ -8350,11 +7922,7 @@ static iw_handler ipw2100_wx_handlers[] = {
NULL, /* SIOCWIWTHRSPY */ NULL, /* SIOCWIWTHRSPY */
ipw2100_wx_set_wap, /* SIOCSIWAP */ ipw2100_wx_set_wap, /* SIOCSIWAP */
ipw2100_wx_get_wap, /* SIOCGIWAP */ ipw2100_wx_get_wap, /* SIOCGIWAP */
#if WIRELESS_EXT > 17
ipw2100_wx_set_mlme, /* SIOCSIWMLME */ ipw2100_wx_set_mlme, /* SIOCSIWMLME */
#else
NULL, /* -- hole -- */
#endif
NULL, /* SIOCGIWAPLIST -- deprecated */ NULL, /* SIOCGIWAPLIST -- deprecated */
ipw2100_wx_set_scan, /* SIOCSIWSCAN */ ipw2100_wx_set_scan, /* SIOCSIWSCAN */
ipw2100_wx_get_scan, /* SIOCGIWSCAN */ ipw2100_wx_get_scan, /* SIOCGIWSCAN */
...@@ -8378,7 +7946,6 @@ static iw_handler ipw2100_wx_handlers[] = { ...@@ -8378,7 +7946,6 @@ static iw_handler ipw2100_wx_handlers[] = {
ipw2100_wx_get_encode, /* SIOCGIWENCODE */ ipw2100_wx_get_encode, /* SIOCGIWENCODE */
ipw2100_wx_set_power, /* SIOCSIWPOWER */ ipw2100_wx_set_power, /* SIOCSIWPOWER */
ipw2100_wx_get_power, /* SIOCGIWPOWER */ ipw2100_wx_get_power, /* SIOCGIWPOWER */
#if WIRELESS_EXT > 17
NULL, /* -- hole -- */ NULL, /* -- hole -- */
NULL, /* -- hole -- */ NULL, /* -- hole -- */
ipw2100_wx_set_genie, /* SIOCSIWGENIE */ ipw2100_wx_set_genie, /* SIOCSIWGENIE */
...@@ -8388,7 +7955,6 @@ static iw_handler ipw2100_wx_handlers[] = { ...@@ -8388,7 +7955,6 @@ static iw_handler ipw2100_wx_handlers[] = {
ipw2100_wx_set_encodeext, /* SIOCSIWENCODEEXT */ ipw2100_wx_set_encodeext, /* SIOCSIWENCODEEXT */
ipw2100_wx_get_encodeext, /* SIOCGIWENCODEEXT */ ipw2100_wx_get_encodeext, /* SIOCGIWENCODEEXT */
NULL, /* SIOCSIWPMKSA */ NULL, /* SIOCSIWPMKSA */
#endif
}; };
#define IPW2100_PRIV_SET_MONITOR SIOCIWFIRSTPRIV #define IPW2100_PRIV_SET_MONITOR SIOCIWFIRSTPRIV
......
...@@ -8936,14 +8936,12 @@ static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid, ...@@ -8936,14 +8936,12 @@ static int ipw_request_direct_scan(struct ipw_priv *priv, char *essid,
IPW_DEBUG_HC("starting request direct scan!\n"); IPW_DEBUG_HC("starting request direct scan!\n");
if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) { if (priv->status & (STATUS_SCANNING | STATUS_SCAN_ABORTING)) {
err = wait_event_interruptible(priv->wait_state, /* We should not sleep here; otherwise we will block most
!(priv-> * of the system (for instance, we hold rtnl_lock when we
status & (STATUS_SCANNING | * get here).
STATUS_SCAN_ABORTING))); */
if (err) { err = -EAGAIN;
IPW_DEBUG_HC("aborting direct scan"); goto done;
goto done;
}
} }
memset(&scan, 0, sizeof(scan)); memset(&scan, 0, sizeof(scan));
......
...@@ -748,7 +748,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info, ...@@ -748,7 +748,7 @@ prism54_get_essid(struct net_device *ndev, struct iw_request_info *info,
if (essid->length) { if (essid->length) {
dwrq->flags = 1; /* set ESSID to ON for Wireless Extensions */ dwrq->flags = 1; /* set ESSID to ON for Wireless Extensions */
/* if it is to big, trunk it */ /* if it is to big, trunk it */
dwrq->length = min(IW_ESSID_MAX_SIZE, essid->length + 1); dwrq->length = min(IW_ESSID_MAX_SIZE, essid->length);
} else { } else {
dwrq->flags = 0; dwrq->flags = 0;
dwrq->length = 0; dwrq->length = 0;
......
...@@ -177,7 +177,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -177,7 +177,7 @@ islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
#endif #endif
newskb->dev = skb->dev; newskb->dev = skb->dev;
dev_kfree_skb(skb); dev_kfree_skb_irq(skb);
skb = newskb; skb = newskb;
} }
} }
......
...@@ -1256,7 +1256,7 @@ static int ray_get_essid(struct net_device *dev, ...@@ -1256,7 +1256,7 @@ static int ray_get_essid(struct net_device *dev,
extra[IW_ESSID_MAX_SIZE] = '\0'; extra[IW_ESSID_MAX_SIZE] = '\0';
/* Push it out ! */ /* Push it out ! */
dwrq->length = strlen(extra) + 1; dwrq->length = strlen(extra);
dwrq->flags = 1; /* active */ dwrq->flags = 1; /* active */
return 0; return 0;
......
...@@ -2280,7 +2280,7 @@ static int wavelan_get_essid(struct net_device *dev, ...@@ -2280,7 +2280,7 @@ static int wavelan_get_essid(struct net_device *dev,
extra[IW_ESSID_MAX_SIZE] = '\0'; extra[IW_ESSID_MAX_SIZE] = '\0';
/* Set the length */ /* Set the length */
wrqu->data.length = strlen(extra) + 1; wrqu->data.length = strlen(extra);
return 0; return 0;
} }
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/list.h> #include <linux/list.h>
#include <net/ieee80211.h>
#include <asm/atomic.h> #include <asm/atomic.h>
enum { enum {
......
...@@ -327,7 +327,7 @@ struct iw_handler_def ...@@ -327,7 +327,7 @@ struct iw_handler_def
__u16 num_private_args; __u16 num_private_args;
/* Array of handlers for standard ioctls /* Array of handlers for standard ioctls
* We will call dev->wireless_handlers->standard[ioctl - SIOCSIWNAME] * We will call dev->wireless_handlers->standard[ioctl - SIOCSIWCOMMIT]
*/ */
const iw_handler * standard; const iw_handler * standard;
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册