提交 06c87850 编写于 作者: D Don Fry 提交者: Jeff Garzik

[PATCH] pcnet32: Handle memory allocation failures cleanly when resizing tx/rx rings

Fix pcnet32_set_ringparam to handle memory allocation errors without
leaving the adapter in an inoperative state and null pointers waiting to
be dereferenced.

Tested ia32 and ppc64.
Signed-off-by: NDon Fry <brazilnut@us.ibm.com>
Signed-off-by: NJeff Garzik <jeff@garzik.org>
上级 12fa30f3
...@@ -185,6 +185,23 @@ static int homepna[MAX_UNITS]; ...@@ -185,6 +185,23 @@ static int homepna[MAX_UNITS];
#define PCNET32_TOTAL_SIZE 0x20 #define PCNET32_TOTAL_SIZE 0x20
#define CSR0 0
#define CSR0_INIT 0x1
#define CSR0_START 0x2
#define CSR0_STOP 0x4
#define CSR0_TXPOLL 0x8
#define CSR0_INTEN 0x40
#define CSR0_IDON 0x0100
#define CSR0_NORMAL (CSR0_START | CSR0_INTEN)
#define PCNET32_INIT_LOW 1
#define PCNET32_INIT_HIGH 2
#define CSR3 3
#define CSR4 4
#define CSR5 5
#define CSR5_SUSPEND 0x0001
#define CSR15 15
#define PCNET32_MC_FILTER 8
/* The PCNET32 Rx and Tx ring descriptors. */ /* The PCNET32 Rx and Tx ring descriptors. */
struct pcnet32_rx_head { struct pcnet32_rx_head {
u32 base; u32 base;
...@@ -415,6 +432,219 @@ static struct pcnet32_access pcnet32_dwio = { ...@@ -415,6 +432,219 @@ static struct pcnet32_access pcnet32_dwio = {
.reset = pcnet32_dwio_reset .reset = pcnet32_dwio_reset
}; };
static void pcnet32_netif_stop(struct net_device *dev)
{
dev->trans_start = jiffies;
netif_poll_disable(dev);
netif_tx_disable(dev);
}
static void pcnet32_netif_start(struct net_device *dev)
{
netif_wake_queue(dev);
netif_poll_enable(dev);
}
/*
* Allocate space for the new sized tx ring.
* Free old resources
* Save new resources.
* Any failure keeps old resources.
* Must be called with lp->lock held.
*/
static void pcnet32_realloc_tx_ring(struct net_device *dev,
struct pcnet32_private *lp,
unsigned int size)
{
dma_addr_t new_ring_dma_addr;
dma_addr_t *new_dma_addr_list;
struct pcnet32_tx_head *new_tx_ring;
struct sk_buff **new_skb_list;
pcnet32_purge_tx_ring(dev);
new_tx_ring = pci_alloc_consistent(lp->pci_dev,
sizeof(struct pcnet32_tx_head) *
(1 << size),
&new_ring_dma_addr);
if (new_tx_ring == NULL) {
if (netif_msg_drv(lp))
printk("\n" KERN_ERR
"%s: Consistent memory allocation failed.\n",
dev->name);
return;
}
memset(new_tx_ring, 0, sizeof(struct pcnet32_tx_head) * (1 << size));
new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
GFP_ATOMIC);
if (!new_dma_addr_list) {
if (netif_msg_drv(lp))
printk("\n" KERN_ERR
"%s: Memory allocation failed.\n", dev->name);
goto free_new_tx_ring;
}
new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
GFP_ATOMIC);
if (!new_skb_list) {
if (netif_msg_drv(lp))
printk("\n" KERN_ERR
"%s: Memory allocation failed.\n", dev->name);
goto free_new_lists;
}
kfree(lp->tx_skbuff);
kfree(lp->tx_dma_addr);
pci_free_consistent(lp->pci_dev,
sizeof(struct pcnet32_tx_head) *
lp->tx_ring_size, lp->tx_ring,
lp->tx_ring_dma_addr);
lp->tx_ring_size = (1 << size);
lp->tx_mod_mask = lp->tx_ring_size - 1;
lp->tx_len_bits = (size << 12);
lp->tx_ring = new_tx_ring;
lp->tx_ring_dma_addr = new_ring_dma_addr;
lp->tx_dma_addr = new_dma_addr_list;
lp->tx_skbuff = new_skb_list;
return;
free_new_lists:
kfree(new_dma_addr_list);
free_new_tx_ring:
pci_free_consistent(lp->pci_dev,
sizeof(struct pcnet32_tx_head) *
(1 << size),
new_tx_ring,
new_ring_dma_addr);
return;
}
/*
* Allocate space for the new sized rx ring.
* Re-use old receive buffers.
* alloc extra buffers
* free unneeded buffers
* free unneeded buffers
* Save new resources.
* Any failure keeps old resources.
* Must be called with lp->lock held.
*/
static void pcnet32_realloc_rx_ring(struct net_device *dev,
struct pcnet32_private *lp,
unsigned int size)
{
dma_addr_t new_ring_dma_addr;
dma_addr_t *new_dma_addr_list;
struct pcnet32_rx_head *new_rx_ring;
struct sk_buff **new_skb_list;
int new, overlap;
new_rx_ring = pci_alloc_consistent(lp->pci_dev,
sizeof(struct pcnet32_rx_head) *
(1 << size),
&new_ring_dma_addr);
if (new_rx_ring == NULL) {
if (netif_msg_drv(lp))
printk("\n" KERN_ERR
"%s: Consistent memory allocation failed.\n",
dev->name);
return;
}
memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
new_dma_addr_list = kcalloc((1 << size), sizeof(dma_addr_t),
GFP_ATOMIC);
if (!new_dma_addr_list) {
if (netif_msg_drv(lp))
printk("\n" KERN_ERR
"%s: Memory allocation failed.\n", dev->name);
goto free_new_rx_ring;
}
new_skb_list = kcalloc((1 << size), sizeof(struct sk_buff *),
GFP_ATOMIC);
if (!new_skb_list) {
if (netif_msg_drv(lp))
printk("\n" KERN_ERR
"%s: Memory allocation failed.\n", dev->name);
goto free_new_lists;
}
/* first copy the current receive buffers */
overlap = min(size, lp->rx_ring_size);
for (new = 0; new < overlap; new++) {
new_rx_ring[new] = lp->rx_ring[new];
new_dma_addr_list[new] = lp->rx_dma_addr[new];
new_skb_list[new] = lp->rx_skbuff[new];
}
/* now allocate any new buffers needed */
for (; new < size; new++ ) {
struct sk_buff *rx_skbuff;
new_skb_list[new] = dev_alloc_skb(PKT_BUF_SZ);
if (!(rx_skbuff = new_skb_list[new])) {
/* keep the original lists and buffers */
if (netif_msg_drv(lp))
printk(KERN_ERR
"%s: pcnet32_realloc_rx_ring dev_alloc_skb failed.\n",
dev->name);
goto free_all_new;
}
skb_reserve(rx_skbuff, 2);
new_dma_addr_list[new] =
pci_map_single(lp->pci_dev, rx_skbuff->data,
PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
new_rx_ring[new].base = (u32) le32_to_cpu(new_dma_addr_list[new]);
new_rx_ring[new].buf_length = le16_to_cpu(2 - PKT_BUF_SZ);
new_rx_ring[new].status = le16_to_cpu(0x8000);
}
/* and free any unneeded buffers */
for (; new < lp->rx_ring_size; new++) {
if (lp->rx_skbuff[new]) {
pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
dev_kfree_skb(lp->rx_skbuff[new]);
}
}
kfree(lp->rx_skbuff);
kfree(lp->rx_dma_addr);
pci_free_consistent(lp->pci_dev,
sizeof(struct pcnet32_rx_head) *
lp->rx_ring_size, lp->rx_ring,
lp->rx_ring_dma_addr);
lp->rx_ring_size = (1 << size);
lp->rx_mod_mask = lp->rx_ring_size - 1;
lp->rx_len_bits = (size << 4);
lp->rx_ring = new_rx_ring;
lp->rx_ring_dma_addr = new_ring_dma_addr;
lp->rx_dma_addr = new_dma_addr_list;
lp->rx_skbuff = new_skb_list;
return;
free_all_new:
for (; --new >= lp->rx_ring_size; ) {
if (new_skb_list[new]) {
pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
PKT_BUF_SZ - 2, PCI_DMA_FROMDEVICE);
dev_kfree_skb(new_skb_list[new]);
}
}
kfree(new_skb_list);
free_new_lists:
kfree(new_dma_addr_list);
free_new_rx_ring:
pci_free_consistent(lp->pci_dev,
sizeof(struct pcnet32_rx_head) *
(1 << size),
new_rx_ring,
new_ring_dma_addr);
return;
}
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
static void pcnet32_poll_controller(struct net_device *dev) static void pcnet32_poll_controller(struct net_device *dev)
{ {
...@@ -526,56 +756,53 @@ static int pcnet32_set_ringparam(struct net_device *dev, ...@@ -526,56 +756,53 @@ static int pcnet32_set_ringparam(struct net_device *dev,
{ {
struct pcnet32_private *lp = dev->priv; struct pcnet32_private *lp = dev->priv;
unsigned long flags; unsigned long flags;
unsigned int size;
ulong ioaddr = dev->base_addr;
int i; int i;
if (ering->rx_mini_pending || ering->rx_jumbo_pending) if (ering->rx_mini_pending || ering->rx_jumbo_pending)
return -EINVAL; return -EINVAL;
if (netif_running(dev)) if (netif_running(dev))
pcnet32_close(dev); pcnet32_netif_stop(dev);
spin_lock_irqsave(&lp->lock, flags); spin_lock_irqsave(&lp->lock, flags);
pcnet32_free_ring(dev); lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
lp->tx_ring_size =
min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE); size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
lp->rx_ring_size =
min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
/* set the minimum ring size to 4, to allow the loopback test to work /* set the minimum ring size to 4, to allow the loopback test to work
* unchanged. * unchanged.
*/ */
for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) { for (i = 2; i <= PCNET32_LOG_MAX_TX_BUFFERS; i++) {
if (lp->tx_ring_size <= (1 << i)) if (size <= (1 << i))
break; break;
} }
lp->tx_ring_size = (1 << i); if ((1 << i) != lp->tx_ring_size)
lp->tx_mod_mask = lp->tx_ring_size - 1; pcnet32_realloc_tx_ring(dev, lp, i);
lp->tx_len_bits = (i << 12);
size = min(ering->rx_pending, (unsigned int)RX_MAX_RING_SIZE);
for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) { for (i = 2; i <= PCNET32_LOG_MAX_RX_BUFFERS; i++) {
if (lp->rx_ring_size <= (1 << i)) if (size <= (1 << i))
break; break;
} }
lp->rx_ring_size = (1 << i); if ((1 << i) != lp->rx_ring_size)
lp->rx_mod_mask = lp->rx_ring_size - 1; pcnet32_realloc_rx_ring(dev, lp, i);
lp->rx_len_bits = (i << 4);
if (pcnet32_alloc_ring(dev, dev->name)) { dev->weight = lp->rx_ring_size / 2;
pcnet32_free_ring(dev);
spin_unlock_irqrestore(&lp->lock, flags); if (netif_running(dev)) {
return -ENOMEM; pcnet32_netif_start(dev);
pcnet32_restart(dev, CSR0_NORMAL);
} }
spin_unlock_irqrestore(&lp->lock, flags); spin_unlock_irqrestore(&lp->lock, flags);
if (pcnet32_debug & NETIF_MSG_DRV) if (netif_msg_drv(lp))
printk(KERN_INFO PFX printk(KERN_INFO
"%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name, "%s: Ring Param Settings: RX: %d, TX: %d\n", dev->name,
lp->rx_ring_size, lp->tx_ring_size); lp->rx_ring_size, lp->tx_ring_size);
if (netif_running(dev))
pcnet32_open(dev);
return 0; return 0;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册