提交 60ca9758 编写于 作者: L Linus Torvalds

Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6

* 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik/netdev-2.6: (42 commits)
  [PATCH] Fix section mismatch in de2104x.c
  [PATCH] sky2: set lower pause threshold to prevent overrun
  [PATCH] sky2: revert pci express extensions
  [PATCH] skge: version 1.9
  [PATCH] skge: better flow control negotiation
  [PATCH] skge: pause mapping for fiber
  [PATCH] skge: fix stuck irq when fiber down
  [PATCH] powerpc/cell spidernet release all descrs
  [PATCH] powerpc/cell spidernet DMA direction fix
  [PATCH] powerpc/cell spidernet variable name change
  [PATCH] powerpc/cell spidernet reduce DMA kicking
  [PATCH] powerpc/cell spidernet
  [PATCH] powerpc/cell spidernet refine locking
  [PATCH] powerpc/cell spidernet NAPI polling info.
  [PATCH] powerpc/cell spidernet low watermark patch.
  [PATCH] powerpc/cell spidernet incorrect offset
  [PATCH] powerpc/cell spidernet stop error printing patch.
  [PATCH] powerpc/cell spidernet fix error interrupt print
  [PATCH] powerpc/cell spidernet bogus rx interrupt bit
  [PATCH] Spidernet stop queue when queue is full.
  ...
...@@ -1706,14 +1706,15 @@ static void __b44_set_rx_mode(struct net_device *dev) ...@@ -1706,14 +1706,15 @@ static void __b44_set_rx_mode(struct net_device *dev)
__b44_set_mac_addr(bp); __b44_set_mac_addr(bp);
if (dev->flags & IFF_ALLMULTI) if ((dev->flags & IFF_ALLMULTI) ||
(dev->mc_count > B44_MCAST_TABLE_SIZE))
val |= RXCONFIG_ALLMULTI; val |= RXCONFIG_ALLMULTI;
else else
i = __b44_load_mcast(bp, dev); i = __b44_load_mcast(bp, dev);
for (; i < 64; i++) { for (; i < 64; i++)
__b44_cam_write(bp, zero, i); __b44_cam_write(bp, zero, i);
}
bw32(bp, B44_RXCONFIG, val); bw32(bp, B44_RXCONFIG, val);
val = br32(bp, B44_CAM_CTRL); val = br32(bp, B44_CAM_CTRL);
bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
...@@ -2055,7 +2056,7 @@ static int b44_read_eeprom(struct b44 *bp, u8 *data) ...@@ -2055,7 +2056,7 @@ static int b44_read_eeprom(struct b44 *bp, u8 *data)
u16 *ptr = (u16 *) data; u16 *ptr = (u16 *) data;
for (i = 0; i < 128; i += 2) for (i = 0; i < 128; i += 2)
ptr[i / 2] = readw(bp->regs + 4096 + i); ptr[i / 2] = cpu_to_le16(readw(bp->regs + 4096 + i));
return 0; return 0;
} }
......
...@@ -1433,7 +1433,7 @@ void bond_alb_monitor(struct bonding *bond) ...@@ -1433,7 +1433,7 @@ void bond_alb_monitor(struct bonding *bond)
* write lock to protect from other code that also * write lock to protect from other code that also
* sets the promiscuity. * sets the promiscuity.
*/ */
write_lock(&bond->curr_slave_lock); write_lock_bh(&bond->curr_slave_lock);
if (bond_info->primary_is_promisc && if (bond_info->primary_is_promisc &&
(++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) { (++bond_info->rlb_promisc_timeout_counter >= RLB_PROMISC_TIMEOUT)) {
...@@ -1448,7 +1448,7 @@ void bond_alb_monitor(struct bonding *bond) ...@@ -1448,7 +1448,7 @@ void bond_alb_monitor(struct bonding *bond)
bond_info->primary_is_promisc = 0; bond_info->primary_is_promisc = 0;
} }
write_unlock(&bond->curr_slave_lock); write_unlock_bh(&bond->curr_slave_lock);
if (bond_info->rlb_rebalance) { if (bond_info->rlb_rebalance) {
bond_info->rlb_rebalance = 0; bond_info->rlb_rebalance = 0;
......
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
#include <asm/io.h> #include <asm/io.h>
#define DRV_NAME "ehea" #define DRV_NAME "ehea"
#define DRV_VERSION "EHEA_0028" #define DRV_VERSION "EHEA_0034"
#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ #define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
| NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
...@@ -50,6 +50,7 @@ ...@@ -50,6 +50,7 @@
#define EHEA_MAX_ENTRIES_SQ 32767 #define EHEA_MAX_ENTRIES_SQ 32767
#define EHEA_MIN_ENTRIES_QP 127 #define EHEA_MIN_ENTRIES_QP 127
#define EHEA_SMALL_QUEUES
#define EHEA_NUM_TX_QP 1 #define EHEA_NUM_TX_QP 1
#ifdef EHEA_SMALL_QUEUES #ifdef EHEA_SMALL_QUEUES
...@@ -59,11 +60,11 @@ ...@@ -59,11 +60,11 @@
#define EHEA_DEF_ENTRIES_RQ2 1023 #define EHEA_DEF_ENTRIES_RQ2 1023
#define EHEA_DEF_ENTRIES_RQ3 1023 #define EHEA_DEF_ENTRIES_RQ3 1023
#else #else
#define EHEA_MAX_CQE_COUNT 32000 #define EHEA_MAX_CQE_COUNT 4080
#define EHEA_DEF_ENTRIES_SQ 16000 #define EHEA_DEF_ENTRIES_SQ 4080
#define EHEA_DEF_ENTRIES_RQ1 32080 #define EHEA_DEF_ENTRIES_RQ1 8160
#define EHEA_DEF_ENTRIES_RQ2 4020 #define EHEA_DEF_ENTRIES_RQ2 2040
#define EHEA_DEF_ENTRIES_RQ3 4020 #define EHEA_DEF_ENTRIES_RQ3 2040
#endif #endif
#define EHEA_MAX_ENTRIES_EQ 20 #define EHEA_MAX_ENTRIES_EQ 20
......
...@@ -766,7 +766,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe) ...@@ -766,7 +766,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) { if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
if (!netif_carrier_ok(port->netdev)) { if (!netif_carrier_ok(port->netdev)) {
ret = ehea_sense_port_attr( ret = ehea_sense_port_attr(
adapter->port[portnum]); port);
if (ret) { if (ret) {
ehea_error("failed resensing port " ehea_error("failed resensing port "
"attributes"); "attributes");
...@@ -818,7 +818,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe) ...@@ -818,7 +818,7 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
netif_stop_queue(port->netdev); netif_stop_queue(port->netdev);
break; break;
default: default:
ehea_error("unknown event code %x", ec); ehea_error("unknown event code %x, eqe=0x%lX", ec, eqe);
break; break;
} }
} }
...@@ -1841,7 +1841,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1841,7 +1841,7 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (netif_msg_tx_queued(port)) { if (netif_msg_tx_queued(port)) {
ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr); ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr);
ehea_dump(swqe, sizeof(*swqe), "swqe"); ehea_dump(swqe, 512, "swqe");
} }
ehea_post_swqe(pr->qp, swqe); ehea_post_swqe(pr->qp, swqe);
......
此差异已折叠。
...@@ -2497,6 +2497,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data) ...@@ -2497,6 +2497,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
u8 __iomem *base = get_hwbase(dev); u8 __iomem *base = get_hwbase(dev);
u32 events; u32 events;
int i; int i;
unsigned long flags;
dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name);
...@@ -2508,16 +2509,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data) ...@@ -2508,16 +2509,16 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
if (!(events & np->irqmask)) if (!(events & np->irqmask))
break; break;
spin_lock_irq(&np->lock); spin_lock_irqsave(&np->lock, flags);
nv_tx_done(dev); nv_tx_done(dev);
spin_unlock_irq(&np->lock); spin_unlock_irqrestore(&np->lock, flags);
if (events & (NVREG_IRQ_TX_ERR)) { if (events & (NVREG_IRQ_TX_ERR)) {
dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n", dprintk(KERN_DEBUG "%s: received irq with events 0x%x. Probably TX fail.\n",
dev->name, events); dev->name, events);
} }
if (i > max_interrupt_work) { if (i > max_interrupt_work) {
spin_lock_irq(&np->lock); spin_lock_irqsave(&np->lock, flags);
/* disable interrupts on the nic */ /* disable interrupts on the nic */
writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask); writel(NVREG_IRQ_TX_ALL, base + NvRegIrqMask);
pci_push(base); pci_push(base);
...@@ -2527,7 +2528,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data) ...@@ -2527,7 +2528,7 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT); mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
} }
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i);
spin_unlock_irq(&np->lock); spin_unlock_irqrestore(&np->lock, flags);
break; break;
} }
...@@ -2601,6 +2602,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data) ...@@ -2601,6 +2602,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
u8 __iomem *base = get_hwbase(dev); u8 __iomem *base = get_hwbase(dev);
u32 events; u32 events;
int i; int i;
unsigned long flags;
dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name);
...@@ -2614,14 +2616,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data) ...@@ -2614,14 +2616,14 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
nv_rx_process(dev, dev->weight); nv_rx_process(dev, dev->weight);
if (nv_alloc_rx(dev)) { if (nv_alloc_rx(dev)) {
spin_lock_irq(&np->lock); spin_lock_irqsave(&np->lock, flags);
if (!np->in_shutdown) if (!np->in_shutdown)
mod_timer(&np->oom_kick, jiffies + OOM_REFILL); mod_timer(&np->oom_kick, jiffies + OOM_REFILL);
spin_unlock_irq(&np->lock); spin_unlock_irqrestore(&np->lock, flags);
} }
if (i > max_interrupt_work) { if (i > max_interrupt_work) {
spin_lock_irq(&np->lock); spin_lock_irqsave(&np->lock, flags);
/* disable interrupts on the nic */ /* disable interrupts on the nic */
writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask); writel(NVREG_IRQ_RX_ALL, base + NvRegIrqMask);
pci_push(base); pci_push(base);
...@@ -2631,7 +2633,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data) ...@@ -2631,7 +2633,7 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT); mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
} }
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i);
spin_unlock_irq(&np->lock); spin_unlock_irqrestore(&np->lock, flags);
break; break;
} }
} }
...@@ -2648,6 +2650,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data) ...@@ -2648,6 +2650,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
u8 __iomem *base = get_hwbase(dev); u8 __iomem *base = get_hwbase(dev);
u32 events; u32 events;
int i; int i;
unsigned long flags;
dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name);
...@@ -2660,14 +2663,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data) ...@@ -2660,14 +2663,14 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
break; break;
if (events & NVREG_IRQ_LINK) { if (events & NVREG_IRQ_LINK) {
spin_lock_irq(&np->lock); spin_lock_irqsave(&np->lock, flags);
nv_link_irq(dev); nv_link_irq(dev);
spin_unlock_irq(&np->lock); spin_unlock_irqrestore(&np->lock, flags);
} }
if (np->need_linktimer && time_after(jiffies, np->link_timeout)) { if (np->need_linktimer && time_after(jiffies, np->link_timeout)) {
spin_lock_irq(&np->lock); spin_lock_irqsave(&np->lock, flags);
nv_linkchange(dev); nv_linkchange(dev);
spin_unlock_irq(&np->lock); spin_unlock_irqrestore(&np->lock, flags);
np->link_timeout = jiffies + LINK_TIMEOUT; np->link_timeout = jiffies + LINK_TIMEOUT;
} }
if (events & (NVREG_IRQ_UNKNOWN)) { if (events & (NVREG_IRQ_UNKNOWN)) {
...@@ -2675,7 +2678,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data) ...@@ -2675,7 +2678,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
dev->name, events); dev->name, events);
} }
if (i > max_interrupt_work) { if (i > max_interrupt_work) {
spin_lock_irq(&np->lock); spin_lock_irqsave(&np->lock, flags);
/* disable interrupts on the nic */ /* disable interrupts on the nic */
writel(NVREG_IRQ_OTHER, base + NvRegIrqMask); writel(NVREG_IRQ_OTHER, base + NvRegIrqMask);
pci_push(base); pci_push(base);
...@@ -2685,7 +2688,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data) ...@@ -2685,7 +2688,7 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
mod_timer(&np->nic_poll, jiffies + POLL_WAIT); mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
} }
printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i);
spin_unlock_irq(&np->lock); spin_unlock_irqrestore(&np->lock, flags);
break; break;
} }
......
...@@ -213,6 +213,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc ...@@ -213,6 +213,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
} }
free_index = pool->consumer_index++ % pool->size; free_index = pool->consumer_index++ % pool->size;
pool->consumer_index = free_index;
index = pool->free_map[free_index]; index = pool->free_map[free_index];
ibmveth_assert(index != IBM_VETH_INVALID_MAP); ibmveth_assert(index != IBM_VETH_INVALID_MAP);
...@@ -238,7 +239,10 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc ...@@ -238,7 +239,10 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
if(lpar_rc != H_SUCCESS) { if(lpar_rc != H_SUCCESS) {
pool->free_map[free_index] = index; pool->free_map[free_index] = index;
pool->skbuff[index] = NULL; pool->skbuff[index] = NULL;
pool->consumer_index--; if (pool->consumer_index == 0)
pool->consumer_index = pool->size - 1;
else
pool->consumer_index--;
dma_unmap_single(&adapter->vdev->dev, dma_unmap_single(&adapter->vdev->dev,
pool->dma_addr[index], pool->buff_size, pool->dma_addr[index], pool->buff_size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
...@@ -326,6 +330,7 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 ...@@ -326,6 +330,7 @@ static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
free_index = adapter->rx_buff_pool[pool].producer_index++ % adapter->rx_buff_pool[pool].size; free_index = adapter->rx_buff_pool[pool].producer_index++ % adapter->rx_buff_pool[pool].size;
adapter->rx_buff_pool[pool].producer_index = free_index;
adapter->rx_buff_pool[pool].free_map[free_index] = index; adapter->rx_buff_pool[pool].free_map[free_index] = index;
mb(); mb();
...@@ -437,6 +442,31 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter) ...@@ -437,6 +442,31 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
&adapter->rx_buff_pool[i]); &adapter->rx_buff_pool[i]);
} }
static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
union ibmveth_buf_desc rxq_desc, u64 mac_address)
{
int rc, try_again = 1;
/* After a kexec the adapter will still be open, so our attempt to
* open it will fail. So if we get a failure we free the adapter and
* try again, but only once. */
retry:
rc = h_register_logical_lan(adapter->vdev->unit_address,
adapter->buffer_list_dma, rxq_desc.desc,
adapter->filter_list_dma, mac_address);
if (rc != H_SUCCESS && try_again) {
do {
rc = h_free_logical_lan(adapter->vdev->unit_address);
} while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
try_again = 0;
goto retry;
}
return rc;
}
static int ibmveth_open(struct net_device *netdev) static int ibmveth_open(struct net_device *netdev)
{ {
struct ibmveth_adapter *adapter = netdev->priv; struct ibmveth_adapter *adapter = netdev->priv;
...@@ -502,12 +532,9 @@ static int ibmveth_open(struct net_device *netdev) ...@@ -502,12 +532,9 @@ static int ibmveth_open(struct net_device *netdev)
ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr); ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr); ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
lpar_rc = h_register_logical_lan(adapter->vdev->unit_address, lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
adapter->buffer_list_dma,
rxq_desc.desc,
adapter->filter_list_dma,
mac_address);
if(lpar_rc != H_SUCCESS) { if(lpar_rc != H_SUCCESS) {
ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc); ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
...@@ -905,6 +932,14 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu) ...@@ -905,6 +932,14 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL; return -EINVAL;
} }
#ifdef CONFIG_NET_POLL_CONTROLLER
static void ibmveth_poll_controller(struct net_device *dev)
{
ibmveth_replenish_task(dev->priv);
ibmveth_interrupt(dev->irq, dev);
}
#endif
static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
{ {
int rc, i; int rc, i;
...@@ -977,6 +1012,9 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_ ...@@ -977,6 +1012,9 @@ static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_
netdev->ethtool_ops = &netdev_ethtool_ops; netdev->ethtool_ops = &netdev_ethtool_ops;
netdev->change_mtu = ibmveth_change_mtu; netdev->change_mtu = ibmveth_change_mtu;
SET_NETDEV_DEV(netdev, &dev->dev); SET_NETDEV_DEV(netdev, &dev->dev);
#ifdef CONFIG_NET_POLL_CONTROLLER
netdev->poll_controller = ibmveth_poll_controller;
#endif
netdev->features |= NETIF_F_LLTX; netdev->features |= NETIF_F_LLTX;
spin_lock_init(&adapter->stats_lock); spin_lock_init(&adapter->stats_lock);
...@@ -1132,7 +1170,9 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) ...@@ -1132,7 +1170,9 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
{ {
struct proc_dir_entry *entry; struct proc_dir_entry *entry;
if (ibmveth_proc_dir) { if (ibmveth_proc_dir) {
entry = create_proc_entry(adapter->netdev->name, S_IFREG, ibmveth_proc_dir); char u_addr[10];
sprintf(u_addr, "%x", adapter->vdev->unit_address);
entry = create_proc_entry(u_addr, S_IFREG, ibmveth_proc_dir);
if (!entry) { if (!entry) {
ibmveth_error_printk("Cannot create adapter proc entry"); ibmveth_error_printk("Cannot create adapter proc entry");
} else { } else {
...@@ -1147,7 +1187,9 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) ...@@ -1147,7 +1187,9 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter) static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
{ {
if (ibmveth_proc_dir) { if (ibmveth_proc_dir) {
remove_proc_entry(adapter->netdev->name, ibmveth_proc_dir); char u_addr[10];
sprintf(u_addr, "%x", adapter->vdev->unit_address);
remove_proc_entry(u_addr, ibmveth_proc_dir);
} }
} }
......
...@@ -2155,7 +2155,7 @@ static void eth_update_mib_counters(struct mv643xx_private *mp) ...@@ -2155,7 +2155,7 @@ static void eth_update_mib_counters(struct mv643xx_private *mp)
for (offset = ETH_MIB_BAD_OCTETS_RECEIVED; for (offset = ETH_MIB_BAD_OCTETS_RECEIVED;
offset <= ETH_MIB_FRAMES_1024_TO_MAX_OCTETS; offset <= ETH_MIB_FRAMES_1024_TO_MAX_OCTETS;
offset += 4) offset += 4)
*(u32 *)((char *)p + offset) = read_mib(mp, offset); *(u32 *)((char *)p + offset) += read_mib(mp, offset);
p->good_octets_sent += read_mib(mp, ETH_MIB_GOOD_OCTETS_SENT_LOW); p->good_octets_sent += read_mib(mp, ETH_MIB_GOOD_OCTETS_SENT_LOW);
p->good_octets_sent += p->good_octets_sent +=
...@@ -2164,7 +2164,7 @@ static void eth_update_mib_counters(struct mv643xx_private *mp) ...@@ -2164,7 +2164,7 @@ static void eth_update_mib_counters(struct mv643xx_private *mp)
for (offset = ETH_MIB_GOOD_FRAMES_SENT; for (offset = ETH_MIB_GOOD_FRAMES_SENT;
offset <= ETH_MIB_LATE_COLLISION; offset <= ETH_MIB_LATE_COLLISION;
offset += 4) offset += 4)
*(u32 *)((char *)p + offset) = read_mib(mp, offset); *(u32 *)((char *)p + offset) += read_mib(mp, offset);
} }
/* /*
......
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
#include "skge.h" #include "skge.h"
#define DRV_NAME "skge" #define DRV_NAME "skge"
#define DRV_VERSION "1.8" #define DRV_VERSION "1.9"
#define PFX DRV_NAME " " #define PFX DRV_NAME " "
#define DEFAULT_TX_RING_SIZE 128 #define DEFAULT_TX_RING_SIZE 128
...@@ -197,8 +197,8 @@ static u32 skge_supported_modes(const struct skge_hw *hw) ...@@ -197,8 +197,8 @@ static u32 skge_supported_modes(const struct skge_hw *hw)
else if (hw->chip_id == CHIP_ID_YUKON) else if (hw->chip_id == CHIP_ID_YUKON)
supported &= ~SUPPORTED_1000baseT_Half; supported &= ~SUPPORTED_1000baseT_Half;
} else } else
supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE supported = SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half
| SUPPORTED_Autoneg; | SUPPORTED_FIBRE | SUPPORTED_Autoneg;
return supported; return supported;
} }
...@@ -487,31 +487,37 @@ static void skge_get_pauseparam(struct net_device *dev, ...@@ -487,31 +487,37 @@ static void skge_get_pauseparam(struct net_device *dev,
{ {
struct skge_port *skge = netdev_priv(dev); struct skge_port *skge = netdev_priv(dev);
ecmd->tx_pause = (skge->flow_control == FLOW_MODE_LOC_SEND) ecmd->rx_pause = (skge->flow_control == FLOW_MODE_SYMMETRIC)
|| (skge->flow_control == FLOW_MODE_SYMMETRIC); || (skge->flow_control == FLOW_MODE_SYM_OR_REM);
ecmd->rx_pause = (skge->flow_control == FLOW_MODE_REM_SEND) ecmd->tx_pause = ecmd->rx_pause || (skge->flow_control == FLOW_MODE_LOC_SEND);
|| (skge->flow_control == FLOW_MODE_SYMMETRIC);
ecmd->autoneg = skge->autoneg; ecmd->autoneg = ecmd->rx_pause || ecmd->tx_pause;
} }
static int skge_set_pauseparam(struct net_device *dev, static int skge_set_pauseparam(struct net_device *dev,
struct ethtool_pauseparam *ecmd) struct ethtool_pauseparam *ecmd)
{ {
struct skge_port *skge = netdev_priv(dev); struct skge_port *skge = netdev_priv(dev);
struct ethtool_pauseparam old;
skge->autoneg = ecmd->autoneg; skge_get_pauseparam(dev, &old);
if (ecmd->rx_pause && ecmd->tx_pause)
skge->flow_control = FLOW_MODE_SYMMETRIC; if (ecmd->autoneg != old.autoneg)
else if (ecmd->rx_pause && !ecmd->tx_pause) skge->flow_control = ecmd->autoneg ? FLOW_MODE_NONE : FLOW_MODE_SYMMETRIC;
skge->flow_control = FLOW_MODE_REM_SEND; else {
else if (!ecmd->rx_pause && ecmd->tx_pause) if (ecmd->rx_pause && ecmd->tx_pause)
skge->flow_control = FLOW_MODE_LOC_SEND; skge->flow_control = FLOW_MODE_SYMMETRIC;
else else if (ecmd->rx_pause && !ecmd->tx_pause)
skge->flow_control = FLOW_MODE_NONE; skge->flow_control = FLOW_MODE_SYM_OR_REM;
else if (!ecmd->rx_pause && ecmd->tx_pause)
skge->flow_control = FLOW_MODE_LOC_SEND;
else
skge->flow_control = FLOW_MODE_NONE;
}
if (netif_running(dev)) if (netif_running(dev))
skge_phy_reset(skge); skge_phy_reset(skge);
return 0; return 0;
} }
...@@ -854,6 +860,23 @@ static int skge_rx_fill(struct net_device *dev) ...@@ -854,6 +860,23 @@ static int skge_rx_fill(struct net_device *dev)
return 0; return 0;
} }
static const char *skge_pause(enum pause_status status)
{
switch(status) {
case FLOW_STAT_NONE:
return "none";
case FLOW_STAT_REM_SEND:
return "rx only";
case FLOW_STAT_LOC_SEND:
return "tx_only";
case FLOW_STAT_SYMMETRIC: /* Both station may send PAUSE */
return "both";
default:
return "indeterminated";
}
}
static void skge_link_up(struct skge_port *skge) static void skge_link_up(struct skge_port *skge)
{ {
skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG), skge_write8(skge->hw, SK_REG(skge->port, LNK_LED_REG),
...@@ -862,16 +885,13 @@ static void skge_link_up(struct skge_port *skge) ...@@ -862,16 +885,13 @@ static void skge_link_up(struct skge_port *skge)
netif_carrier_on(skge->netdev); netif_carrier_on(skge->netdev);
netif_wake_queue(skge->netdev); netif_wake_queue(skge->netdev);
if (netif_msg_link(skge)) if (netif_msg_link(skge)) {
printk(KERN_INFO PFX printk(KERN_INFO PFX
"%s: Link is up at %d Mbps, %s duplex, flow control %s\n", "%s: Link is up at %d Mbps, %s duplex, flow control %s\n",
skge->netdev->name, skge->speed, skge->netdev->name, skge->speed,
skge->duplex == DUPLEX_FULL ? "full" : "half", skge->duplex == DUPLEX_FULL ? "full" : "half",
(skge->flow_control == FLOW_MODE_NONE) ? "none" : skge_pause(skge->flow_status));
(skge->flow_control == FLOW_MODE_LOC_SEND) ? "tx only" : }
(skge->flow_control == FLOW_MODE_REM_SEND) ? "rx only" :
(skge->flow_control == FLOW_MODE_SYMMETRIC) ? "tx and rx" :
"unknown");
} }
static void skge_link_down(struct skge_port *skge) static void skge_link_down(struct skge_port *skge)
...@@ -884,6 +904,29 @@ static void skge_link_down(struct skge_port *skge) ...@@ -884,6 +904,29 @@ static void skge_link_down(struct skge_port *skge)
printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name); printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name);
} }
static void xm_link_down(struct skge_hw *hw, int port)
{
struct net_device *dev = hw->dev[port];
struct skge_port *skge = netdev_priv(dev);
u16 cmd, msk;
if (hw->phy_type == SK_PHY_XMAC) {
msk = xm_read16(hw, port, XM_IMSK);
msk |= XM_IS_INP_ASS | XM_IS_LIPA_RC | XM_IS_RX_PAGE | XM_IS_AND;
xm_write16(hw, port, XM_IMSK, msk);
}
cmd = xm_read16(hw, port, XM_MMU_CMD);
cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
xm_write16(hw, port, XM_MMU_CMD, cmd);
/* dummy read to ensure writing */
(void) xm_read16(hw, port, XM_MMU_CMD);
if (netif_carrier_ok(dev))
skge_link_down(skge);
}
static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val) static int __xm_phy_read(struct skge_hw *hw, int port, u16 reg, u16 *val)
{ {
int i; int i;
...@@ -992,7 +1035,15 @@ static const u16 phy_pause_map[] = { ...@@ -992,7 +1035,15 @@ static const u16 phy_pause_map[] = {
[FLOW_MODE_NONE] = 0, [FLOW_MODE_NONE] = 0,
[FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM, [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM,
[FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP, [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP,
[FLOW_MODE_REM_SEND] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM, [FLOW_MODE_SYM_OR_REM] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM,
};
/* special defines for FIBER (88E1011S only) */
static const u16 fiber_pause_map[] = {
[FLOW_MODE_NONE] = PHY_X_P_NO_PAUSE,
[FLOW_MODE_LOC_SEND] = PHY_X_P_ASYM_MD,
[FLOW_MODE_SYMMETRIC] = PHY_X_P_SYM_MD,
[FLOW_MODE_SYM_OR_REM] = PHY_X_P_BOTH_MD,
}; };
...@@ -1008,14 +1059,7 @@ static void bcom_check_link(struct skge_hw *hw, int port) ...@@ -1008,14 +1059,7 @@ static void bcom_check_link(struct skge_hw *hw, int port)
status = xm_phy_read(hw, port, PHY_BCOM_STAT); status = xm_phy_read(hw, port, PHY_BCOM_STAT);
if ((status & PHY_ST_LSYNC) == 0) { if ((status & PHY_ST_LSYNC) == 0) {
u16 cmd = xm_read16(hw, port, XM_MMU_CMD); xm_link_down(hw, port);
cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
xm_write16(hw, port, XM_MMU_CMD, cmd);
/* dummy read to ensure writing */
(void) xm_read16(hw, port, XM_MMU_CMD);
if (netif_carrier_ok(dev))
skge_link_down(skge);
return; return;
} }
...@@ -1048,20 +1092,19 @@ static void bcom_check_link(struct skge_hw *hw, int port) ...@@ -1048,20 +1092,19 @@ static void bcom_check_link(struct skge_hw *hw, int port)
return; return;
} }
/* We are using IEEE 802.3z/D5.0 Table 37-4 */ /* We are using IEEE 802.3z/D5.0 Table 37-4 */
switch (aux & PHY_B_AS_PAUSE_MSK) { switch (aux & PHY_B_AS_PAUSE_MSK) {
case PHY_B_AS_PAUSE_MSK: case PHY_B_AS_PAUSE_MSK:
skge->flow_control = FLOW_MODE_SYMMETRIC; skge->flow_status = FLOW_STAT_SYMMETRIC;
break; break;
case PHY_B_AS_PRR: case PHY_B_AS_PRR:
skge->flow_control = FLOW_MODE_REM_SEND; skge->flow_status = FLOW_STAT_REM_SEND;
break; break;
case PHY_B_AS_PRT: case PHY_B_AS_PRT:
skge->flow_control = FLOW_MODE_LOC_SEND; skge->flow_status = FLOW_STAT_LOC_SEND;
break; break;
default: default:
skge->flow_control = FLOW_MODE_NONE; skge->flow_status = FLOW_STAT_NONE;
} }
skge->speed = SPEED_1000; skge->speed = SPEED_1000;
} }
...@@ -1191,17 +1234,7 @@ static void xm_phy_init(struct skge_port *skge) ...@@ -1191,17 +1234,7 @@ static void xm_phy_init(struct skge_port *skge)
if (skge->advertising & ADVERTISED_1000baseT_Full) if (skge->advertising & ADVERTISED_1000baseT_Full)
ctrl |= PHY_X_AN_FD; ctrl |= PHY_X_AN_FD;
switch(skge->flow_control) { ctrl |= fiber_pause_map[skge->flow_control];
case FLOW_MODE_NONE:
ctrl |= PHY_X_P_NO_PAUSE;
break;
case FLOW_MODE_LOC_SEND:
ctrl |= PHY_X_P_ASYM_MD;
break;
case FLOW_MODE_SYMMETRIC:
ctrl |= PHY_X_P_BOTH_MD;
break;
}
xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl); xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl);
...@@ -1235,14 +1268,7 @@ static void xm_check_link(struct net_device *dev) ...@@ -1235,14 +1268,7 @@ static void xm_check_link(struct net_device *dev)
status = xm_phy_read(hw, port, PHY_XMAC_STAT); status = xm_phy_read(hw, port, PHY_XMAC_STAT);
if ((status & PHY_ST_LSYNC) == 0) { if ((status & PHY_ST_LSYNC) == 0) {
u16 cmd = xm_read16(hw, port, XM_MMU_CMD); xm_link_down(hw, port);
cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
xm_write16(hw, port, XM_MMU_CMD, cmd);
/* dummy read to ensure writing */
(void) xm_read16(hw, port, XM_MMU_CMD);
if (netif_carrier_ok(dev))
skge_link_down(skge);
return; return;
} }
...@@ -1276,15 +1302,20 @@ static void xm_check_link(struct net_device *dev) ...@@ -1276,15 +1302,20 @@ static void xm_check_link(struct net_device *dev)
} }
/* We are using IEEE 802.3z/D5.0 Table 37-4 */ /* We are using IEEE 802.3z/D5.0 Table 37-4 */
if (lpa & PHY_X_P_SYM_MD) if ((skge->flow_control == FLOW_MODE_SYMMETRIC ||
skge->flow_control = FLOW_MODE_SYMMETRIC; skge->flow_control == FLOW_MODE_SYM_OR_REM) &&
else if ((lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD) (lpa & PHY_X_P_SYM_MD))
skge->flow_control = FLOW_MODE_REM_SEND; skge->flow_status = FLOW_STAT_SYMMETRIC;
else if ((lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD) else if (skge->flow_control == FLOW_MODE_SYM_OR_REM &&
skge->flow_control = FLOW_MODE_LOC_SEND; (lpa & PHY_X_RS_PAUSE) == PHY_X_P_ASYM_MD)
/* Enable PAUSE receive, disable PAUSE transmit */
skge->flow_status = FLOW_STAT_REM_SEND;
else if (skge->flow_control == FLOW_MODE_LOC_SEND &&
(lpa & PHY_X_RS_PAUSE) == PHY_X_P_BOTH_MD)
/* Disable PAUSE receive, enable PAUSE transmit */
skge->flow_status = FLOW_STAT_LOC_SEND;
else else
skge->flow_control = FLOW_MODE_NONE; skge->flow_status = FLOW_STAT_NONE;
skge->speed = SPEED_1000; skge->speed = SPEED_1000;
} }
...@@ -1568,6 +1599,10 @@ static void genesis_mac_intr(struct skge_hw *hw, int port) ...@@ -1568,6 +1599,10 @@ static void genesis_mac_intr(struct skge_hw *hw, int port)
printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n", printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
skge->netdev->name, status); skge->netdev->name, status);
if (hw->phy_type == SK_PHY_XMAC &&
(status & (XM_IS_INP_ASS | XM_IS_LIPA_RC)))
xm_link_down(hw, port);
if (status & XM_IS_TXF_UR) { if (status & XM_IS_TXF_UR) {
xm_write32(hw, port, XM_MODE, XM_MD_FTF); xm_write32(hw, port, XM_MODE, XM_MD_FTF);
++skge->net_stats.tx_fifo_errors; ++skge->net_stats.tx_fifo_errors;
...@@ -1582,7 +1617,7 @@ static void genesis_link_up(struct skge_port *skge) ...@@ -1582,7 +1617,7 @@ static void genesis_link_up(struct skge_port *skge)
{ {
struct skge_hw *hw = skge->hw; struct skge_hw *hw = skge->hw;
int port = skge->port; int port = skge->port;
u16 cmd; u16 cmd, msk;
u32 mode; u32 mode;
cmd = xm_read16(hw, port, XM_MMU_CMD); cmd = xm_read16(hw, port, XM_MMU_CMD);
...@@ -1591,8 +1626,8 @@ static void genesis_link_up(struct skge_port *skge) ...@@ -1591,8 +1626,8 @@ static void genesis_link_up(struct skge_port *skge)
* enabling pause frame reception is required for 1000BT * enabling pause frame reception is required for 1000BT
* because the XMAC is not reset if the link is going down * because the XMAC is not reset if the link is going down
*/ */
if (skge->flow_control == FLOW_MODE_NONE || if (skge->flow_status == FLOW_STAT_NONE ||
skge->flow_control == FLOW_MODE_LOC_SEND) skge->flow_status == FLOW_STAT_LOC_SEND)
/* Disable Pause Frame Reception */ /* Disable Pause Frame Reception */
cmd |= XM_MMU_IGN_PF; cmd |= XM_MMU_IGN_PF;
else else
...@@ -1602,8 +1637,8 @@ static void genesis_link_up(struct skge_port *skge) ...@@ -1602,8 +1637,8 @@ static void genesis_link_up(struct skge_port *skge)
xm_write16(hw, port, XM_MMU_CMD, cmd); xm_write16(hw, port, XM_MMU_CMD, cmd);
mode = xm_read32(hw, port, XM_MODE); mode = xm_read32(hw, port, XM_MODE);
if (skge->flow_control == FLOW_MODE_SYMMETRIC || if (skge->flow_status== FLOW_STAT_SYMMETRIC ||
skge->flow_control == FLOW_MODE_LOC_SEND) { skge->flow_status == FLOW_STAT_LOC_SEND) {
/* /*
* Configure Pause Frame Generation * Configure Pause Frame Generation
* Use internal and external Pause Frame Generation. * Use internal and external Pause Frame Generation.
...@@ -1631,7 +1666,11 @@ static void genesis_link_up(struct skge_port *skge) ...@@ -1631,7 +1666,11 @@ static void genesis_link_up(struct skge_port *skge)
} }
xm_write32(hw, port, XM_MODE, mode); xm_write32(hw, port, XM_MODE, mode);
xm_write16(hw, port, XM_IMSK, XM_DEF_MSK); msk = XM_DEF_MSK;
if (hw->phy_type != SK_PHY_XMAC)
msk |= XM_IS_INP_ASS; /* disable GP0 interrupt bit */
xm_write16(hw, port, XM_IMSK, msk);
xm_read16(hw, port, XM_ISRC); xm_read16(hw, port, XM_ISRC);
/* get MMU Command Reg. */ /* get MMU Command Reg. */
...@@ -1779,11 +1818,17 @@ static void yukon_init(struct skge_hw *hw, int port) ...@@ -1779,11 +1818,17 @@ static void yukon_init(struct skge_hw *hw, int port)
adv |= PHY_M_AN_10_FD; adv |= PHY_M_AN_10_FD;
if (skge->advertising & ADVERTISED_10baseT_Half) if (skge->advertising & ADVERTISED_10baseT_Half)
adv |= PHY_M_AN_10_HD; adv |= PHY_M_AN_10_HD;
} else /* special defines for FIBER (88E1011S only) */
adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
/* Set Flow-control capabilities */ /* Set Flow-control capabilities */
adv |= phy_pause_map[skge->flow_control]; adv |= phy_pause_map[skge->flow_control];
} else {
if (skge->advertising & ADVERTISED_1000baseT_Full)
adv |= PHY_M_AN_1000X_AFD;
if (skge->advertising & ADVERTISED_1000baseT_Half)
adv |= PHY_M_AN_1000X_AHD;
adv |= fiber_pause_map[skge->flow_control];
}
/* Restart Auto-negotiation */ /* Restart Auto-negotiation */
ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
...@@ -1917,6 +1962,11 @@ static void yukon_mac_init(struct skge_hw *hw, int port) ...@@ -1917,6 +1962,11 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
case FLOW_MODE_LOC_SEND: case FLOW_MODE_LOC_SEND:
/* disable Rx flow-control */ /* disable Rx flow-control */
reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
break;
case FLOW_MODE_SYMMETRIC:
case FLOW_MODE_SYM_OR_REM:
/* enable Tx & Rx flow-control */
break;
} }
gma_write16(hw, port, GM_GP_CTRL, reg); gma_write16(hw, port, GM_GP_CTRL, reg);
...@@ -2111,13 +2161,11 @@ static void yukon_link_down(struct skge_port *skge) ...@@ -2111,13 +2161,11 @@ static void yukon_link_down(struct skge_port *skge)
ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA); ctrl &= ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA);
gma_write16(hw, port, GM_GP_CTRL, ctrl); gma_write16(hw, port, GM_GP_CTRL, ctrl);
if (skge->flow_control == FLOW_MODE_REM_SEND) { if (skge->flow_status == FLOW_STAT_REM_SEND) {
ctrl = gm_phy_read(hw, port, PHY_MARV_AUNE_ADV);
ctrl |= PHY_M_AN_ASP;
/* restore Asymmetric Pause bit */ /* restore Asymmetric Pause bit */
gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, ctrl);
gm_phy_read(hw, port,
PHY_MARV_AUNE_ADV)
| PHY_M_AN_ASP);
} }
yukon_reset(hw, port); yukon_reset(hw, port);
...@@ -2164,19 +2212,19 @@ static void yukon_phy_intr(struct skge_port *skge) ...@@ -2164,19 +2212,19 @@ static void yukon_phy_intr(struct skge_port *skge)
/* We are using IEEE 802.3z/D5.0 Table 37-4 */ /* We are using IEEE 802.3z/D5.0 Table 37-4 */
switch (phystat & PHY_M_PS_PAUSE_MSK) { switch (phystat & PHY_M_PS_PAUSE_MSK) {
case PHY_M_PS_PAUSE_MSK: case PHY_M_PS_PAUSE_MSK:
skge->flow_control = FLOW_MODE_SYMMETRIC; skge->flow_status = FLOW_STAT_SYMMETRIC;
break; break;
case PHY_M_PS_RX_P_EN: case PHY_M_PS_RX_P_EN:
skge->flow_control = FLOW_MODE_REM_SEND; skge->flow_status = FLOW_STAT_REM_SEND;
break; break;
case PHY_M_PS_TX_P_EN: case PHY_M_PS_TX_P_EN:
skge->flow_control = FLOW_MODE_LOC_SEND; skge->flow_status = FLOW_STAT_LOC_SEND;
break; break;
default: default:
skge->flow_control = FLOW_MODE_NONE; skge->flow_status = FLOW_STAT_NONE;
} }
if (skge->flow_control == FLOW_MODE_NONE || if (skge->flow_status == FLOW_STAT_NONE ||
(skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF)) (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF))
skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
else else
...@@ -3399,7 +3447,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, ...@@ -3399,7 +3447,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port,
/* Auto speed and flow control */ /* Auto speed and flow control */
skge->autoneg = AUTONEG_ENABLE; skge->autoneg = AUTONEG_ENABLE;
skge->flow_control = FLOW_MODE_SYMMETRIC; skge->flow_control = FLOW_MODE_SYM_OR_REM;
skge->duplex = -1; skge->duplex = -1;
skge->speed = -1; skge->speed = -1;
skge->advertising = skge_supported_modes(hw); skge->advertising = skge_supported_modes(hw);
......
...@@ -2195,7 +2195,8 @@ enum { ...@@ -2195,7 +2195,8 @@ enum {
XM_IS_RX_COMP = 1<<0, /* Bit 0: Frame Rx Complete */ XM_IS_RX_COMP = 1<<0, /* Bit 0: Frame Rx Complete */
}; };
#define XM_DEF_MSK (~(XM_IS_RXC_OV | XM_IS_TXC_OV | XM_IS_RXF_OV | XM_IS_TXF_UR)) #define XM_DEF_MSK (~(XM_IS_INP_ASS | XM_IS_LIPA_RC | \
XM_IS_RXF_OV | XM_IS_TXF_UR))
/* XM_HW_CFG 16 bit r/w Hardware Config Register */ /* XM_HW_CFG 16 bit r/w Hardware Config Register */
...@@ -2426,13 +2427,24 @@ struct skge_hw { ...@@ -2426,13 +2427,24 @@ struct skge_hw {
struct mutex phy_mutex; struct mutex phy_mutex;
}; };
enum { enum pause_control {
FLOW_MODE_NONE = 0, /* No Flow-Control */ FLOW_MODE_NONE = 1, /* No Flow-Control */
FLOW_MODE_LOC_SEND = 1, /* Local station sends PAUSE */ FLOW_MODE_LOC_SEND = 2, /* Local station sends PAUSE */
FLOW_MODE_REM_SEND = 2, /* Symmetric or just remote */
FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */ FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */
FLOW_MODE_SYM_OR_REM = 4, /* Both stations may send PAUSE or
* just the remote station may send PAUSE
*/
};
enum pause_status {
FLOW_STAT_INDETERMINATED=0, /* indeterminated */
FLOW_STAT_NONE, /* No Flow Control */
FLOW_STAT_REM_SEND, /* Remote Station sends PAUSE */
FLOW_STAT_LOC_SEND, /* Local station sends PAUSE */
FLOW_STAT_SYMMETRIC, /* Both station may send PAUSE */
}; };
struct skge_port { struct skge_port {
u32 msg_enable; u32 msg_enable;
struct skge_hw *hw; struct skge_hw *hw;
...@@ -2445,9 +2457,10 @@ struct skge_port { ...@@ -2445,9 +2457,10 @@ struct skge_port {
struct net_device_stats net_stats; struct net_device_stats net_stats;
struct work_struct link_thread; struct work_struct link_thread;
enum pause_control flow_control;
enum pause_status flow_status;
u8 rx_csum; u8 rx_csum;
u8 blink_on; u8 blink_on;
u8 flow_control;
u8 wol; u8 wol;
u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */ u8 autoneg; /* AUTONEG_ENABLE, AUTONEG_DISABLE */
u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */ u8 duplex; /* DUPLEX_HALF, DUPLEX_FULL */
......
...@@ -683,7 +683,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port) ...@@ -683,7 +683,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); sky2_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
if (hw->chip_id == CHIP_ID_YUKON_EC_U) { if (hw->chip_id == CHIP_ID_YUKON_EC_U) {
sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 512/8);
sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
if (hw->dev[port]->mtu > ETH_DATA_LEN) { if (hw->dev[port]->mtu > ETH_DATA_LEN) {
/* set Tx GMAC FIFO Almost Empty Threshold */ /* set Tx GMAC FIFO Almost Empty Threshold */
...@@ -1907,7 +1907,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2, ...@@ -1907,7 +1907,7 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr,
length, PCI_DMA_FROMDEVICE); length, PCI_DMA_FROMDEVICE);
re->skb->ip_summed = CHECKSUM_NONE; re->skb->ip_summed = CHECKSUM_NONE;
__skb_put(skb, length); skb_put(skb, length);
} }
return skb; return skb;
} }
...@@ -1970,7 +1970,7 @@ static struct sk_buff *receive_new(struct sky2_port *sky2, ...@@ -1970,7 +1970,7 @@ static struct sk_buff *receive_new(struct sky2_port *sky2,
if (skb_shinfo(skb)->nr_frags) if (skb_shinfo(skb)->nr_frags)
skb_put_frags(skb, hdr_space, length); skb_put_frags(skb, hdr_space, length);
else else
skb_put(skb, hdr_space); skb_put(skb, length);
return skb; return skb;
} }
...@@ -2220,8 +2220,7 @@ static void sky2_hw_intr(struct sky2_hw *hw) ...@@ -2220,8 +2220,7 @@ static void sky2_hw_intr(struct sky2_hw *hw)
/* PCI-Express uncorrectable Error occurred */ /* PCI-Express uncorrectable Error occurred */
u32 pex_err; u32 pex_err;
pex_err = sky2_pci_read32(hw, pex_err = sky2_pci_read32(hw, PEX_UNC_ERR_STAT);
hw->err_cap + PCI_ERR_UNCOR_STATUS);
if (net_ratelimit()) if (net_ratelimit())
printk(KERN_ERR PFX "%s: pci express error (0x%x)\n", printk(KERN_ERR PFX "%s: pci express error (0x%x)\n",
...@@ -2229,20 +2228,15 @@ static void sky2_hw_intr(struct sky2_hw *hw) ...@@ -2229,20 +2228,15 @@ static void sky2_hw_intr(struct sky2_hw *hw)
/* clear the interrupt */ /* clear the interrupt */
sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
sky2_pci_write32(hw, sky2_pci_write32(hw, PEX_UNC_ERR_STAT,
hw->err_cap + PCI_ERR_UNCOR_STATUS, 0xffffffffUL);
0xffffffffUL);
sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); sky2_write32(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
if (pex_err & PEX_FATAL_ERRORS) {
/* In case of fatal error mask off to keep from getting stuck */
if (pex_err & (PCI_ERR_UNC_POISON_TLP | PCI_ERR_UNC_FCP
| PCI_ERR_UNC_DLP)) {
u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK); u32 hwmsk = sky2_read32(hw, B0_HWE_IMSK);
hwmsk &= ~Y2_IS_PCI_EXP; hwmsk &= ~Y2_IS_PCI_EXP;
sky2_write32(hw, B0_HWE_IMSK, hwmsk); sky2_write32(hw, B0_HWE_IMSK, hwmsk);
} }
} }
if (status & Y2_HWE_L1_MASK) if (status & Y2_HWE_L1_MASK)
...@@ -2423,7 +2417,6 @@ static int sky2_reset(struct sky2_hw *hw) ...@@ -2423,7 +2417,6 @@ static int sky2_reset(struct sky2_hw *hw)
u16 status; u16 status;
u8 t8; u8 t8;
int i; int i;
u32 msk;
sky2_write8(hw, B0_CTST, CS_RST_CLR); sky2_write8(hw, B0_CTST, CS_RST_CLR);
...@@ -2464,13 +2457,9 @@ static int sky2_reset(struct sky2_hw *hw) ...@@ -2464,13 +2457,9 @@ static int sky2_reset(struct sky2_hw *hw)
sky2_write8(hw, B0_CTST, CS_MRST_CLR); sky2_write8(hw, B0_CTST, CS_MRST_CLR);
/* clear any PEX errors */ /* clear any PEX errors */
if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP)) { if (pci_find_capability(hw->pdev, PCI_CAP_ID_EXP))
hw->err_cap = pci_find_ext_capability(hw->pdev, PCI_EXT_CAP_ID_ERR); sky2_pci_write32(hw, PEX_UNC_ERR_STAT, 0xffffffffUL);
if (hw->err_cap)
sky2_pci_write32(hw,
hw->err_cap + PCI_ERR_UNCOR_STATUS,
0xffffffffUL);
}
hw->pmd_type = sky2_read8(hw, B2_PMD_TYP); hw->pmd_type = sky2_read8(hw, B2_PMD_TYP);
hw->ports = 1; hw->ports = 1;
...@@ -2527,10 +2516,7 @@ static int sky2_reset(struct sky2_hw *hw) ...@@ -2527,10 +2516,7 @@ static int sky2_reset(struct sky2_hw *hw)
sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53); sky2_write8(hw, RAM_BUFFER(i, B3_RI_RTO_XS2), SK_RI_TO_53);
} }
msk = Y2_HWE_ALL_MASK; sky2_write32(hw, B0_HWE_IMSK, Y2_HWE_ALL_MASK);
if (!hw->err_cap)
msk &= ~Y2_IS_PCI_EXP;
sky2_write32(hw, B0_HWE_IMSK, msk);
for (i = 0; i < hw->ports; i++) for (i = 0; i < hw->ports; i++)
sky2_gmac_reset(hw, i); sky2_gmac_reset(hw, i);
......
...@@ -6,15 +6,24 @@ ...@@ -6,15 +6,24 @@
#define ETH_JUMBO_MTU 9000 /* Maximum MTU supported */ #define ETH_JUMBO_MTU 9000 /* Maximum MTU supported */
/* PCI device specific config registers */ /* PCI config registers */
enum { enum {
PCI_DEV_REG1 = 0x40, PCI_DEV_REG1 = 0x40,
PCI_DEV_REG2 = 0x44, PCI_DEV_REG2 = 0x44,
PCI_DEV_STATUS = 0x7c,
PCI_DEV_REG3 = 0x80, PCI_DEV_REG3 = 0x80,
PCI_DEV_REG4 = 0x84, PCI_DEV_REG4 = 0x84,
PCI_DEV_REG5 = 0x88, PCI_DEV_REG5 = 0x88,
}; };
enum {
PEX_DEV_CAP = 0xe4,
PEX_DEV_CTRL = 0xe8,
PEX_DEV_STA = 0xea,
PEX_LNK_STAT = 0xf2,
PEX_UNC_ERR_STAT= 0x104,
};
/* Yukon-2 */ /* Yukon-2 */
enum pci_dev_reg_1 { enum pci_dev_reg_1 {
PCI_Y2_PIG_ENA = 1<<31, /* Enable Plug-in-Go (YUKON-2) */ PCI_Y2_PIG_ENA = 1<<31, /* Enable Plug-in-Go (YUKON-2) */
...@@ -63,6 +72,39 @@ enum pci_dev_reg_4 { ...@@ -63,6 +72,39 @@ enum pci_dev_reg_4 {
PCI_STATUS_REC_MASTER_ABORT | \ PCI_STATUS_REC_MASTER_ABORT | \
PCI_STATUS_REC_TARGET_ABORT | \ PCI_STATUS_REC_TARGET_ABORT | \
PCI_STATUS_PARITY) PCI_STATUS_PARITY)
enum pex_dev_ctrl {
PEX_DC_MAX_RRS_MSK = 7<<12, /* Bit 14..12: Max. Read Request Size */
PEX_DC_EN_NO_SNOOP = 1<<11,/* Enable No Snoop */
PEX_DC_EN_AUX_POW = 1<<10,/* Enable AUX Power */
PEX_DC_EN_PHANTOM = 1<<9, /* Enable Phantom Functions */
PEX_DC_EN_EXT_TAG = 1<<8, /* Enable Extended Tag Field */
PEX_DC_MAX_PLS_MSK = 7<<5, /* Bit 7.. 5: Max. Payload Size Mask */
PEX_DC_EN_REL_ORD = 1<<4, /* Enable Relaxed Ordering */
PEX_DC_EN_UNS_RQ_RP = 1<<3, /* Enable Unsupported Request Reporting */
PEX_DC_EN_FAT_ER_RP = 1<<2, /* Enable Fatal Error Reporting */
PEX_DC_EN_NFA_ER_RP = 1<<1, /* Enable Non-Fatal Error Reporting */
PEX_DC_EN_COR_ER_RP = 1<<0, /* Enable Correctable Error Reporting */
};
#define PEX_DC_MAX_RD_RQ_SIZE(x) (((x)<<12) & PEX_DC_MAX_RRS_MSK)
/* PEX_UNC_ERR_STAT PEX Uncorrectable Errors Status Register (Yukon-2) */
enum pex_err {
PEX_UNSUP_REQ = 1<<20, /* Unsupported Request Error */
PEX_MALFOR_TLP = 1<<18, /* Malformed TLP */
PEX_UNEXP_COMP = 1<<16, /* Unexpected Completion */
PEX_COMP_TO = 1<<14, /* Completion Timeout */
PEX_FLOW_CTRL_P = 1<<13, /* Flow Control Protocol Error */
PEX_POIS_TLP = 1<<12, /* Poisoned TLP */
PEX_DATA_LINK_P = 1<<4, /* Data Link Protocol Error */
PEX_FATAL_ERRORS= (PEX_MALFOR_TLP | PEX_FLOW_CTRL_P | PEX_DATA_LINK_P),
};
enum csr_regs { enum csr_regs {
B0_RAP = 0x0000, B0_RAP = 0x0000,
B0_CTST = 0x0004, B0_CTST = 0x0004,
...@@ -1836,7 +1878,6 @@ struct sky2_hw { ...@@ -1836,7 +1878,6 @@ struct sky2_hw {
struct net_device *dev[2]; struct net_device *dev[2];
int pm_cap; int pm_cap;
int err_cap;
u8 chip_id; u8 chip_id;
u8 chip_rev; u8 chip_rev;
u8 pmd_type; u8 pmd_type;
......
...@@ -398,6 +398,24 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r, ...@@ -398,6 +398,24 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r,
#define SMC_IRQ_FLAGS (0) #define SMC_IRQ_FLAGS (0)
#elif defined(CONFIG_ARCH_VERSATILE)
#define SMC_CAN_USE_8BIT 1
#define SMC_CAN_USE_16BIT 1
#define SMC_CAN_USE_32BIT 1
#define SMC_NOWAIT 1
#define SMC_inb(a, r) readb((a) + (r))
#define SMC_inw(a, r) readw((a) + (r))
#define SMC_inl(a, r) readl((a) + (r))
#define SMC_outb(v, a, r) writeb(v, (a) + (r))
#define SMC_outw(v, a, r) writew(v, (a) + (r))
#define SMC_outl(v, a, r) writel(v, (a) + (r))
#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
#define SMC_IRQ_FLAGS (0)
#else #else
#define SMC_CAN_USE_8BIT 1 #define SMC_CAN_USE_8BIT 1
......
...@@ -55,12 +55,13 @@ MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \ ...@@ -55,12 +55,13 @@ MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
"<Jens.Osterkamp@de.ibm.com>"); "<Jens.Osterkamp@de.ibm.com>");
MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver"); MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_VERSION(VERSION);
static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT; static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT; static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
module_param(rx_descriptors, int, 0644); module_param(rx_descriptors, int, 0444);
module_param(tx_descriptors, int, 0644); module_param(tx_descriptors, int, 0444);
MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \ MODULE_PARM_DESC(rx_descriptors, "number of descriptors used " \
"in rx chains"); "in rx chains");
...@@ -300,7 +301,7 @@ static int ...@@ -300,7 +301,7 @@ static int
spider_net_init_chain(struct spider_net_card *card, spider_net_init_chain(struct spider_net_card *card,
struct spider_net_descr_chain *chain, struct spider_net_descr_chain *chain,
struct spider_net_descr *start_descr, struct spider_net_descr *start_descr,
int direction, int no) int no)
{ {
int i; int i;
struct spider_net_descr *descr; struct spider_net_descr *descr;
...@@ -315,7 +316,7 @@ spider_net_init_chain(struct spider_net_card *card, ...@@ -315,7 +316,7 @@ spider_net_init_chain(struct spider_net_card *card,
buf = pci_map_single(card->pdev, descr, buf = pci_map_single(card->pdev, descr,
SPIDER_NET_DESCR_SIZE, SPIDER_NET_DESCR_SIZE,
direction); PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(buf)) if (pci_dma_mapping_error(buf))
goto iommu_error; goto iommu_error;
...@@ -329,11 +330,6 @@ spider_net_init_chain(struct spider_net_card *card, ...@@ -329,11 +330,6 @@ spider_net_init_chain(struct spider_net_card *card,
(descr-1)->next = start_descr; (descr-1)->next = start_descr;
start_descr->prev = descr-1; start_descr->prev = descr-1;
descr = start_descr;
if (direction == PCI_DMA_FROMDEVICE)
for (i=0; i < no; i++, descr++)
descr->next_descr_addr = descr->next->bus_addr;
spin_lock_init(&chain->lock); spin_lock_init(&chain->lock);
chain->head = start_descr; chain->head = start_descr;
chain->tail = start_descr; chain->tail = start_descr;
...@@ -346,7 +342,7 @@ spider_net_init_chain(struct spider_net_card *card, ...@@ -346,7 +342,7 @@ spider_net_init_chain(struct spider_net_card *card,
if (descr->bus_addr) if (descr->bus_addr)
pci_unmap_single(card->pdev, descr->bus_addr, pci_unmap_single(card->pdev, descr->bus_addr,
SPIDER_NET_DESCR_SIZE, SPIDER_NET_DESCR_SIZE,
direction); PCI_DMA_BIDIRECTIONAL);
return -ENOMEM; return -ENOMEM;
} }
...@@ -362,15 +358,15 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card) ...@@ -362,15 +358,15 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card)
struct spider_net_descr *descr; struct spider_net_descr *descr;
descr = card->rx_chain.head; descr = card->rx_chain.head;
while (descr->next != card->rx_chain.head) { do {
if (descr->skb) { if (descr->skb) {
dev_kfree_skb(descr->skb); dev_kfree_skb(descr->skb);
pci_unmap_single(card->pdev, descr->buf_addr, pci_unmap_single(card->pdev, descr->buf_addr,
SPIDER_NET_MAX_FRAME, SPIDER_NET_MAX_FRAME,
PCI_DMA_FROMDEVICE); PCI_DMA_BIDIRECTIONAL);
} }
descr = descr->next; descr = descr->next;
} } while (descr != card->rx_chain.head);
} }
/** /**
...@@ -645,26 +641,41 @@ static int ...@@ -645,26 +641,41 @@ static int
spider_net_prepare_tx_descr(struct spider_net_card *card, spider_net_prepare_tx_descr(struct spider_net_card *card,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct spider_net_descr *descr = card->tx_chain.head; struct spider_net_descr *descr;
dma_addr_t buf; dma_addr_t buf;
unsigned long flags;
int length;
buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); length = skb->len;
if (length < ETH_ZLEN) {
if (skb_pad(skb, ETH_ZLEN-length))
return 0;
length = ETH_ZLEN;
}
buf = pci_map_single(card->pdev, skb->data, length, PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(buf)) { if (pci_dma_mapping_error(buf)) {
if (netif_msg_tx_err(card) && net_ratelimit()) if (netif_msg_tx_err(card) && net_ratelimit())
pr_err("could not iommu-map packet (%p, %i). " pr_err("could not iommu-map packet (%p, %i). "
"Dropping packet\n", skb->data, skb->len); "Dropping packet\n", skb->data, length);
card->spider_stats.tx_iommu_map_error++; card->spider_stats.tx_iommu_map_error++;
return -ENOMEM; return -ENOMEM;
} }
spin_lock_irqsave(&card->tx_chain.lock, flags);
descr = card->tx_chain.head;
card->tx_chain.head = descr->next;
descr->buf_addr = buf; descr->buf_addr = buf;
descr->buf_size = skb->len; descr->buf_size = length;
descr->next_descr_addr = 0; descr->next_descr_addr = 0;
descr->skb = skb; descr->skb = skb;
descr->data_status = 0; descr->data_status = 0;
descr->dmac_cmd_status = descr->dmac_cmd_status =
SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS; SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS;
spin_unlock_irqrestore(&card->tx_chain.lock, flags);
if (skb->protocol == htons(ETH_P_IP)) if (skb->protocol == htons(ETH_P_IP))
switch (skb->nh.iph->protocol) { switch (skb->nh.iph->protocol) {
case IPPROTO_TCP: case IPPROTO_TCP:
...@@ -675,32 +686,51 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, ...@@ -675,32 +686,51 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
break; break;
} }
/* Chain the bus address, so that the DMA engine finds this descr. */
descr->prev->next_descr_addr = descr->bus_addr; descr->prev->next_descr_addr = descr->bus_addr;
card->netdev->trans_start = jiffies; /* set netdev watchdog timer */
return 0; return 0;
} }
/** static int
* spider_net_release_tx_descr - processes a used tx descriptor spider_net_set_low_watermark(struct spider_net_card *card)
* @card: card structure
* @descr: descriptor to release
*
* releases a used tx descriptor (unmapping, freeing of skb)
*/
static inline void
spider_net_release_tx_descr(struct spider_net_card *card)
{ {
unsigned long flags;
int status;
int cnt=0;
int i;
struct spider_net_descr *descr = card->tx_chain.tail; struct spider_net_descr *descr = card->tx_chain.tail;
struct sk_buff *skb;
card->tx_chain.tail = card->tx_chain.tail->next; /* Measure the length of the queue. Measurement does not
descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE; * need to be precise -- does not need a lock. */
while (descr != card->tx_chain.head) {
status = descr->dmac_cmd_status & SPIDER_NET_DESCR_NOT_IN_USE;
if (status == SPIDER_NET_DESCR_NOT_IN_USE)
break;
descr = descr->next;
cnt++;
}
/* unmap the skb */ /* If TX queue is short, don't even bother with interrupts */
skb = descr->skb; if (cnt < card->num_tx_desc/4)
pci_unmap_single(card->pdev, descr->buf_addr, skb->len, return cnt;
PCI_DMA_TODEVICE);
dev_kfree_skb_any(skb); /* Set low-watermark 3/4th's of the way into the queue. */
descr = card->tx_chain.tail;
cnt = (cnt*3)/4;
for (i=0;i<cnt; i++)
descr = descr->next;
/* Set the new watermark, clear the old watermark */
spin_lock_irqsave(&card->tx_chain.lock, flags);
descr->dmac_cmd_status |= SPIDER_NET_DESCR_TXDESFLG;
if (card->low_watermark && card->low_watermark != descr)
card->low_watermark->dmac_cmd_status =
card->low_watermark->dmac_cmd_status & ~SPIDER_NET_DESCR_TXDESFLG;
card->low_watermark = descr;
spin_unlock_irqrestore(&card->tx_chain.lock, flags);
return cnt;
} }
/** /**
...@@ -719,21 +749,29 @@ static int ...@@ -719,21 +749,29 @@ static int
spider_net_release_tx_chain(struct spider_net_card *card, int brutal) spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
{ {
struct spider_net_descr_chain *chain = &card->tx_chain; struct spider_net_descr_chain *chain = &card->tx_chain;
struct spider_net_descr *descr;
struct sk_buff *skb;
u32 buf_addr;
unsigned long flags;
int status; int status;
spider_net_read_reg(card, SPIDER_NET_GDTDMACCNTR);
while (chain->tail != chain->head) { while (chain->tail != chain->head) {
status = spider_net_get_descr_status(chain->tail); spin_lock_irqsave(&chain->lock, flags);
descr = chain->tail;
status = spider_net_get_descr_status(descr);
switch (status) { switch (status) {
case SPIDER_NET_DESCR_COMPLETE: case SPIDER_NET_DESCR_COMPLETE:
card->netdev_stats.tx_packets++; card->netdev_stats.tx_packets++;
card->netdev_stats.tx_bytes += chain->tail->skb->len; card->netdev_stats.tx_bytes += descr->skb->len;
break; break;
case SPIDER_NET_DESCR_CARDOWNED: case SPIDER_NET_DESCR_CARDOWNED:
if (!brutal) if (!brutal) {
spin_unlock_irqrestore(&chain->lock, flags);
return 1; return 1;
}
/* fallthrough, if we release the descriptors /* fallthrough, if we release the descriptors
* brutally (then we don't care about * brutally (then we don't care about
* SPIDER_NET_DESCR_CARDOWNED) */ * SPIDER_NET_DESCR_CARDOWNED) */
...@@ -750,11 +788,25 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal) ...@@ -750,11 +788,25 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
default: default:
card->netdev_stats.tx_dropped++; card->netdev_stats.tx_dropped++;
return 1; if (!brutal) {
spin_unlock_irqrestore(&chain->lock, flags);
return 1;
}
} }
spider_net_release_tx_descr(card);
}
chain->tail = descr->next;
descr->dmac_cmd_status |= SPIDER_NET_DESCR_NOT_IN_USE;
skb = descr->skb;
buf_addr = descr->buf_addr;
spin_unlock_irqrestore(&chain->lock, flags);
/* unmap the skb */
if (skb) {
int len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
pci_unmap_single(card->pdev, buf_addr, len, PCI_DMA_TODEVICE);
dev_kfree_skb(skb);
}
}
return 0; return 0;
} }
...@@ -763,8 +815,12 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal) ...@@ -763,8 +815,12 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
* @card: card structure * @card: card structure
* @descr: descriptor address to enable TX processing at * @descr: descriptor address to enable TX processing at
* *
* spider_net_kick_tx_dma writes the current tx chain head as start address * This routine will start the transmit DMA running if
* of the tx descriptor chain and enables the transmission DMA engine * it is not already running. This routine ned only be
* called when queueing a new packet to an empty tx queue.
* Writes the current tx chain head as start address
* of the tx descriptor chain and enables the transmission
* DMA engine.
*/ */
static inline void static inline void
spider_net_kick_tx_dma(struct spider_net_card *card) spider_net_kick_tx_dma(struct spider_net_card *card)
...@@ -804,65 +860,43 @@ spider_net_kick_tx_dma(struct spider_net_card *card) ...@@ -804,65 +860,43 @@ spider_net_kick_tx_dma(struct spider_net_card *card)
static int static int
spider_net_xmit(struct sk_buff *skb, struct net_device *netdev) spider_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{ {
int cnt;
struct spider_net_card *card = netdev_priv(netdev); struct spider_net_card *card = netdev_priv(netdev);
struct spider_net_descr_chain *chain = &card->tx_chain; struct spider_net_descr_chain *chain = &card->tx_chain;
struct spider_net_descr *descr = chain->head;
unsigned long flags;
int result;
spin_lock_irqsave(&chain->lock, flags);
spider_net_release_tx_chain(card, 0); spider_net_release_tx_chain(card, 0);
if (chain->head->next == chain->tail->prev) { if ((chain->head->next == chain->tail->prev) ||
card->netdev_stats.tx_dropped++; (spider_net_prepare_tx_descr(card, skb) != 0)) {
result = NETDEV_TX_LOCKED;
goto out;
}
if (spider_net_get_descr_status(descr) != SPIDER_NET_DESCR_NOT_IN_USE) {
card->netdev_stats.tx_dropped++; card->netdev_stats.tx_dropped++;
result = NETDEV_TX_LOCKED; netif_stop_queue(netdev);
goto out; return NETDEV_TX_BUSY;
} }
if (spider_net_prepare_tx_descr(card, skb) != 0) { cnt = spider_net_set_low_watermark(card);
card->netdev_stats.tx_dropped++; if (cnt < 5)
result = NETDEV_TX_BUSY; spider_net_kick_tx_dma(card);
goto out; return NETDEV_TX_OK;
}
result = NETDEV_TX_OK;
spider_net_kick_tx_dma(card);
card->tx_chain.head = card->tx_chain.head->next;
out:
spin_unlock_irqrestore(&chain->lock, flags);
netif_wake_queue(netdev);
return result;
} }
/** /**
* spider_net_cleanup_tx_ring - cleans up the TX ring * spider_net_cleanup_tx_ring - cleans up the TX ring
* @card: card structure * @card: card structure
* *
* spider_net_cleanup_tx_ring is called by the tx_timer (as we don't use * spider_net_cleanup_tx_ring is called by either the tx_timer
* interrupts to cleanup our TX ring) and returns sent packets to the stack * or from the NAPI polling routine.
* by freeing them * This routine releases resources associted with transmitted
* packets, including updating the queue tail pointer.
*/ */
static void static void
spider_net_cleanup_tx_ring(struct spider_net_card *card) spider_net_cleanup_tx_ring(struct spider_net_card *card)
{ {
unsigned long flags;
spin_lock_irqsave(&card->tx_chain.lock, flags);
if ((spider_net_release_tx_chain(card, 0) != 0) && if ((spider_net_release_tx_chain(card, 0) != 0) &&
(card->netdev->flags & IFF_UP)) (card->netdev->flags & IFF_UP)) {
spider_net_kick_tx_dma(card); spider_net_kick_tx_dma(card);
netif_wake_queue(card->netdev);
spin_unlock_irqrestore(&card->tx_chain.lock, flags); }
} }
/** /**
...@@ -1053,6 +1087,7 @@ spider_net_poll(struct net_device *netdev, int *budget) ...@@ -1053,6 +1087,7 @@ spider_net_poll(struct net_device *netdev, int *budget)
int packets_to_do, packets_done = 0; int packets_to_do, packets_done = 0;
int no_more_packets = 0; int no_more_packets = 0;
spider_net_cleanup_tx_ring(card);
packets_to_do = min(*budget, netdev->quota); packets_to_do = min(*budget, netdev->quota);
while (packets_to_do) { while (packets_to_do) {
...@@ -1243,12 +1278,15 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) ...@@ -1243,12 +1278,15 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
case SPIDER_NET_PHYINT: case SPIDER_NET_PHYINT:
case SPIDER_NET_GMAC2INT: case SPIDER_NET_GMAC2INT:
case SPIDER_NET_GMAC1INT: case SPIDER_NET_GMAC1INT:
case SPIDER_NET_GIPSINT:
case SPIDER_NET_GFIFOINT: case SPIDER_NET_GFIFOINT:
case SPIDER_NET_DMACINT: case SPIDER_NET_DMACINT:
case SPIDER_NET_GSYSINT: case SPIDER_NET_GSYSINT:
break; */ break; */
case SPIDER_NET_GIPSINT:
show_error = 0;
break;
case SPIDER_NET_GPWOPCMPINT: case SPIDER_NET_GPWOPCMPINT:
/* PHY write operation completed */ /* PHY write operation completed */
show_error = 0; show_error = 0;
...@@ -1307,9 +1345,10 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) ...@@ -1307,9 +1345,10 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
case SPIDER_NET_GDTDCEINT: case SPIDER_NET_GDTDCEINT:
/* chain end. If a descriptor should be sent, kick off /* chain end. If a descriptor should be sent, kick off
* tx dma * tx dma
if (card->tx_chain.tail == card->tx_chain.head) if (card->tx_chain.tail != card->tx_chain.head)
spider_net_kick_tx_dma(card); spider_net_kick_tx_dma(card);
show_error = 0; */ */
show_error = 0;
break; break;
/* case SPIDER_NET_G1TMCNTINT: not used. print a message */ /* case SPIDER_NET_G1TMCNTINT: not used. print a message */
...@@ -1354,7 +1393,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) ...@@ -1354,7 +1393,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
if (netif_msg_intr(card)) if (netif_msg_intr(card))
pr_err("got descriptor chain end interrupt, " pr_err("got descriptor chain end interrupt, "
"restarting DMAC %c.\n", "restarting DMAC %c.\n",
'D'+i-SPIDER_NET_GDDDCEINT); 'D'-(i-SPIDER_NET_GDDDCEINT)/3);
spider_net_refill_rx_chain(card); spider_net_refill_rx_chain(card);
spider_net_enable_rxdmac(card); spider_net_enable_rxdmac(card);
show_error = 0; show_error = 0;
...@@ -1423,8 +1462,9 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg) ...@@ -1423,8 +1462,9 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
} }
if ((show_error) && (netif_msg_intr(card))) if ((show_error) && (netif_msg_intr(card)))
pr_err("Got error interrupt, GHIINT0STS = 0x%08x, " pr_err("Got error interrupt on %s, GHIINT0STS = 0x%08x, "
"GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n", "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
card->netdev->name,
status_reg, error_reg1, error_reg2); status_reg, error_reg1, error_reg2);
/* clear interrupt sources */ /* clear interrupt sources */
...@@ -1460,6 +1500,8 @@ spider_net_interrupt(int irq, void *ptr) ...@@ -1460,6 +1500,8 @@ spider_net_interrupt(int irq, void *ptr)
spider_net_rx_irq_off(card); spider_net_rx_irq_off(card);
netif_rx_schedule(netdev); netif_rx_schedule(netdev);
} }
if (status_reg & SPIDER_NET_TXINT)
netif_rx_schedule(netdev);
if (status_reg & SPIDER_NET_ERRINT ) if (status_reg & SPIDER_NET_ERRINT )
spider_net_handle_error_irq(card, status_reg); spider_net_handle_error_irq(card, status_reg);
...@@ -1599,7 +1641,7 @@ spider_net_enable_card(struct spider_net_card *card) ...@@ -1599,7 +1641,7 @@ spider_net_enable_card(struct spider_net_card *card)
SPIDER_NET_INT2_MASK_VALUE); SPIDER_NET_INT2_MASK_VALUE);
spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR, spider_net_write_reg(card, SPIDER_NET_GDTDMACCNTR,
SPIDER_NET_GDTDCEIDIS); SPIDER_NET_GDTBSTA | SPIDER_NET_GDTDCEIDIS);
} }
/** /**
...@@ -1615,17 +1657,26 @@ int ...@@ -1615,17 +1657,26 @@ int
spider_net_open(struct net_device *netdev) spider_net_open(struct net_device *netdev)
{ {
struct spider_net_card *card = netdev_priv(netdev); struct spider_net_card *card = netdev_priv(netdev);
int result; struct spider_net_descr *descr;
int i, result;
result = -ENOMEM; result = -ENOMEM;
if (spider_net_init_chain(card, &card->tx_chain, card->descr, if (spider_net_init_chain(card, &card->tx_chain, card->descr,
PCI_DMA_TODEVICE, card->tx_desc)) card->num_tx_desc))
goto alloc_tx_failed; goto alloc_tx_failed;
card->low_watermark = NULL;
/* rx_chain is after tx_chain, so offset is descr + tx_count */
if (spider_net_init_chain(card, &card->rx_chain, if (spider_net_init_chain(card, &card->rx_chain,
card->descr + card->rx_desc, card->descr + card->num_tx_desc,
PCI_DMA_FROMDEVICE, card->rx_desc)) card->num_rx_desc))
goto alloc_rx_failed; goto alloc_rx_failed;
descr = card->rx_chain.head;
for (i=0; i < card->num_rx_desc; i++, descr++)
descr->next_descr_addr = descr->next->bus_addr;
/* allocate rx skbs */ /* allocate rx skbs */
if (spider_net_alloc_rx_skbs(card)) if (spider_net_alloc_rx_skbs(card))
goto alloc_skbs_failed; goto alloc_skbs_failed;
...@@ -1878,10 +1929,7 @@ spider_net_stop(struct net_device *netdev) ...@@ -1878,10 +1929,7 @@ spider_net_stop(struct net_device *netdev)
spider_net_disable_rxdmac(card); spider_net_disable_rxdmac(card);
/* release chains */ /* release chains */
if (spin_trylock(&card->tx_chain.lock)) { spider_net_release_tx_chain(card, 1);
spider_net_release_tx_chain(card, 1);
spin_unlock(&card->tx_chain.lock);
}
spider_net_free_chain(card, &card->tx_chain); spider_net_free_chain(card, &card->tx_chain);
spider_net_free_chain(card, &card->rx_chain); spider_net_free_chain(card, &card->rx_chain);
...@@ -2012,8 +2060,8 @@ spider_net_setup_netdev(struct spider_net_card *card) ...@@ -2012,8 +2060,8 @@ spider_net_setup_netdev(struct spider_net_card *card)
card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT; card->options.rx_csum = SPIDER_NET_RX_CSUM_DEFAULT;
card->tx_desc = tx_descriptors; card->num_tx_desc = tx_descriptors;
card->rx_desc = rx_descriptors; card->num_rx_desc = rx_descriptors;
spider_net_setup_netdev_ops(netdev); spider_net_setup_netdev_ops(netdev);
...@@ -2252,6 +2300,8 @@ static struct pci_driver spider_net_driver = { ...@@ -2252,6 +2300,8 @@ static struct pci_driver spider_net_driver = {
*/ */
static int __init spider_net_init(void) static int __init spider_net_init(void)
{ {
printk(KERN_INFO "Spidernet version %s.\n", VERSION);
if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) { if (rx_descriptors < SPIDER_NET_RX_DESCRIPTORS_MIN) {
rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN; rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_MIN;
pr_info("adjusting rx descriptors to %i.\n", rx_descriptors); pr_info("adjusting rx descriptors to %i.\n", rx_descriptors);
......
...@@ -24,6 +24,8 @@ ...@@ -24,6 +24,8 @@
#ifndef _SPIDER_NET_H #ifndef _SPIDER_NET_H
#define _SPIDER_NET_H #define _SPIDER_NET_H
#define VERSION "1.1 A"
#include "sungem_phy.h" #include "sungem_phy.h"
extern int spider_net_stop(struct net_device *netdev); extern int spider_net_stop(struct net_device *netdev);
...@@ -47,7 +49,7 @@ extern char spider_net_driver_name[]; ...@@ -47,7 +49,7 @@ extern char spider_net_driver_name[];
#define SPIDER_NET_TX_DESCRIPTORS_MIN 16 #define SPIDER_NET_TX_DESCRIPTORS_MIN 16
#define SPIDER_NET_TX_DESCRIPTORS_MAX 512 #define SPIDER_NET_TX_DESCRIPTORS_MAX 512
#define SPIDER_NET_TX_TIMER 20 #define SPIDER_NET_TX_TIMER (HZ/5)
#define SPIDER_NET_RX_CSUM_DEFAULT 1 #define SPIDER_NET_RX_CSUM_DEFAULT 1
...@@ -189,7 +191,9 @@ extern char spider_net_driver_name[]; ...@@ -189,7 +191,9 @@ extern char spider_net_driver_name[];
#define SPIDER_NET_MACMODE_VALUE 0x00000001 #define SPIDER_NET_MACMODE_VALUE 0x00000001
#define SPIDER_NET_BURSTLMT_VALUE 0x00000200 /* about 16 us */ #define SPIDER_NET_BURSTLMT_VALUE 0x00000200 /* about 16 us */
/* 1(0) enable r/tx dma /* DMAC control register GDMACCNTR
*
* 1(0) enable r/tx dma
* 0000000 fixed to 0 * 0000000 fixed to 0
* *
* 000000 fixed to 0 * 000000 fixed to 0
...@@ -198,6 +202,7 @@ extern char spider_net_driver_name[]; ...@@ -198,6 +202,7 @@ extern char spider_net_driver_name[];
* *
* 000000 fixed to 0 * 000000 fixed to 0
* 00 burst alignment: 128 bytes * 00 burst alignment: 128 bytes
* 11 burst alignment: 1024 bytes
* *
* 00000 fixed to 0 * 00000 fixed to 0
* 0 descr writeback size 32 bytes * 0 descr writeback size 32 bytes
...@@ -208,10 +213,13 @@ extern char spider_net_driver_name[]; ...@@ -208,10 +213,13 @@ extern char spider_net_driver_name[];
#define SPIDER_NET_DMA_RX_VALUE 0x80000000 #define SPIDER_NET_DMA_RX_VALUE 0x80000000
#define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003 #define SPIDER_NET_DMA_RX_FEND_VALUE 0x00030003
/* to set TX_DMA_EN */ /* to set TX_DMA_EN */
#define SPIDER_NET_TX_DMA_EN 0x80000000 #define SPIDER_NET_TX_DMA_EN 0x80000000
#define SPIDER_NET_GDTDCEIDIS 0x00000002 #define SPIDER_NET_GDTBSTA 0x00000300
#define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \ #define SPIDER_NET_GDTDCEIDIS 0x00000002
SPIDER_NET_GDTDCEIDIS #define SPIDER_NET_DMA_TX_VALUE SPIDER_NET_TX_DMA_EN | \
SPIDER_NET_GDTBSTA | \
SPIDER_NET_GDTDCEIDIS
#define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003 #define SPIDER_NET_DMA_TX_FEND_VALUE 0x00030003
/* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */ /* SPIDER_NET_UA_DESCR_VALUE is OR'ed with the unicast address */
...@@ -320,13 +328,10 @@ enum spider_net_int2_status { ...@@ -320,13 +328,10 @@ enum spider_net_int2_status {
SPIDER_NET_GRISPDNGINT SPIDER_NET_GRISPDNGINT
}; };
#define SPIDER_NET_TXINT ( (1 << SPIDER_NET_GTTEDINT) | \ #define SPIDER_NET_TXINT ( (1 << SPIDER_NET_GDTFDCINT) )
(1 << SPIDER_NET_GDTDCEINT) | \
(1 << SPIDER_NET_GDTFDCINT) )
/* we rely on flagged descriptor interrupts*/ /* We rely on flagged descriptor interrupts */
#define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) | \ #define SPIDER_NET_RXINT ( (1 << SPIDER_NET_GDAFDCINT) )
(1 << SPIDER_NET_GRMFLLINT) )
#define SPIDER_NET_ERRINT ( 0xffffffff & \ #define SPIDER_NET_ERRINT ( 0xffffffff & \
(~SPIDER_NET_TXINT) & \ (~SPIDER_NET_TXINT) & \
...@@ -349,6 +354,7 @@ enum spider_net_int2_status { ...@@ -349,6 +354,7 @@ enum spider_net_int2_status {
#define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */ #define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */
#define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */ #define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */
#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000 #define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000
#define SPIDER_NET_DESCR_TXDESFLG 0x00800000
struct spider_net_descr { struct spider_net_descr {
/* as defined by the hardware */ /* as defined by the hardware */
...@@ -433,6 +439,7 @@ struct spider_net_card { ...@@ -433,6 +439,7 @@ struct spider_net_card {
struct spider_net_descr_chain tx_chain; struct spider_net_descr_chain tx_chain;
struct spider_net_descr_chain rx_chain; struct spider_net_descr_chain rx_chain;
struct spider_net_descr *low_watermark;
struct net_device_stats netdev_stats; struct net_device_stats netdev_stats;
...@@ -448,8 +455,8 @@ struct spider_net_card { ...@@ -448,8 +455,8 @@ struct spider_net_card {
/* for ethtool */ /* for ethtool */
int msg_enable; int msg_enable;
int rx_desc; int num_rx_desc;
int tx_desc; int num_tx_desc;
struct spider_net_extra_stats spider_stats; struct spider_net_extra_stats spider_stats;
struct spider_net_descr descr[0]; struct spider_net_descr descr[0];
......
...@@ -76,7 +76,7 @@ spider_net_ethtool_get_drvinfo(struct net_device *netdev, ...@@ -76,7 +76,7 @@ spider_net_ethtool_get_drvinfo(struct net_device *netdev,
/* clear and fill out info */ /* clear and fill out info */
memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); memset(drvinfo, 0, sizeof(struct ethtool_drvinfo));
strncpy(drvinfo->driver, spider_net_driver_name, 32); strncpy(drvinfo->driver, spider_net_driver_name, 32);
strncpy(drvinfo->version, "0.1", 32); strncpy(drvinfo->version, VERSION, 32);
strcpy(drvinfo->fw_version, "no information"); strcpy(drvinfo->fw_version, "no information");
strncpy(drvinfo->bus_info, pci_name(card->pdev), 32); strncpy(drvinfo->bus_info, pci_name(card->pdev), 32);
} }
...@@ -158,9 +158,9 @@ spider_net_ethtool_get_ringparam(struct net_device *netdev, ...@@ -158,9 +158,9 @@ spider_net_ethtool_get_ringparam(struct net_device *netdev,
struct spider_net_card *card = netdev->priv; struct spider_net_card *card = netdev->priv;
ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX; ering->tx_max_pending = SPIDER_NET_TX_DESCRIPTORS_MAX;
ering->tx_pending = card->tx_desc; ering->tx_pending = card->num_tx_desc;
ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX; ering->rx_max_pending = SPIDER_NET_RX_DESCRIPTORS_MAX;
ering->rx_pending = card->rx_desc; ering->rx_pending = card->num_rx_desc;
} }
static int spider_net_get_stats_count(struct net_device *netdev) static int spider_net_get_stats_count(struct net_device *netdev)
......
...@@ -1730,7 +1730,7 @@ static void __init de21040_get_media_info(struct de_private *de) ...@@ -1730,7 +1730,7 @@ static void __init de21040_get_media_info(struct de_private *de)
} }
/* Note: this routine returns extra data bits for size detection. */ /* Note: this routine returns extra data bits for size detection. */
static unsigned __init tulip_read_eeprom(void __iomem *regs, int location, int addr_len) static unsigned __devinit tulip_read_eeprom(void __iomem *regs, int location, int addr_len)
{ {
int i; int i;
unsigned retval = 0; unsigned retval = 0;
...@@ -1926,7 +1926,7 @@ static void __init de21041_get_srom_info (struct de_private *de) ...@@ -1926,7 +1926,7 @@ static void __init de21041_get_srom_info (struct de_private *de)
goto fill_defaults; goto fill_defaults;
} }
static int __init de_init_one (struct pci_dev *pdev, static int __devinit de_init_one (struct pci_dev *pdev,
const struct pci_device_id *ent) const struct pci_device_id *ent)
{ {
struct net_device *dev; struct net_device *dev;
...@@ -2082,7 +2082,7 @@ static int __init de_init_one (struct pci_dev *pdev, ...@@ -2082,7 +2082,7 @@ static int __init de_init_one (struct pci_dev *pdev,
return rc; return rc;
} }
static void __exit de_remove_one (struct pci_dev *pdev) static void __devexit de_remove_one (struct pci_dev *pdev)
{ {
struct net_device *dev = pci_get_drvdata(pdev); struct net_device *dev = pci_get_drvdata(pdev);
struct de_private *de = dev->priv; struct de_private *de = dev->priv;
...@@ -2164,7 +2164,7 @@ static struct pci_driver de_driver = { ...@@ -2164,7 +2164,7 @@ static struct pci_driver de_driver = {
.name = DRV_NAME, .name = DRV_NAME,
.id_table = de_pci_tbl, .id_table = de_pci_tbl,
.probe = de_init_one, .probe = de_init_one,
.remove = __exit_p(de_remove_one), .remove = __devexit_p(de_remove_one),
#ifdef CONFIG_PM #ifdef CONFIG_PM
.suspend = de_suspend, .suspend = de_suspend,
.resume = de_resume, .resume = de_resume,
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册