提交 88426f2a 编写于 作者: D Denis Kirjanov 提交者: David S. Miller

ibmveth: Cleanup error handling inside ibmveth_open

Remove duplicated code in one place.
Signed-off-by: NDenis Kirjanov <dkirjanov@kernel.org>
Signed-off-by: NDavid S. Miller <davem@davemloft.net>
上级 27b75c95
...@@ -546,9 +546,8 @@ static int ibmveth_open(struct net_device *netdev) ...@@ -546,9 +546,8 @@ static int ibmveth_open(struct net_device *netdev)
if (!adapter->buffer_list_addr || !adapter->filter_list_addr) { if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
netdev_err(netdev, "unable to allocate filter or buffer list " netdev_err(netdev, "unable to allocate filter or buffer list "
"pages\n"); "pages\n");
ibmveth_cleanup(adapter); rc = -ENOMEM;
napi_disable(&adapter->napi); goto err_out;
return -ENOMEM;
} }
adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
...@@ -558,9 +557,8 @@ static int ibmveth_open(struct net_device *netdev) ...@@ -558,9 +557,8 @@ static int ibmveth_open(struct net_device *netdev)
if (!adapter->rx_queue.queue_addr) { if (!adapter->rx_queue.queue_addr) {
netdev_err(netdev, "unable to allocate rx queue pages\n"); netdev_err(netdev, "unable to allocate rx queue pages\n");
ibmveth_cleanup(adapter); rc = -ENOMEM;
napi_disable(&adapter->napi); goto err_out;
return -ENOMEM;
} }
dev = &adapter->vdev->dev; dev = &adapter->vdev->dev;
...@@ -578,9 +576,8 @@ static int ibmveth_open(struct net_device *netdev) ...@@ -578,9 +576,8 @@ static int ibmveth_open(struct net_device *netdev)
(dma_mapping_error(dev, adapter->rx_queue.queue_dma))) { (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
netdev_err(netdev, "unable to map filter or buffer list " netdev_err(netdev, "unable to map filter or buffer list "
"pages\n"); "pages\n");
ibmveth_cleanup(adapter); rc = -ENOMEM;
napi_disable(&adapter->napi); goto err_out;
return -ENOMEM;
} }
adapter->rx_queue.index = 0; adapter->rx_queue.index = 0;
...@@ -611,9 +608,8 @@ static int ibmveth_open(struct net_device *netdev) ...@@ -611,9 +608,8 @@ static int ibmveth_open(struct net_device *netdev)
adapter->filter_list_dma, adapter->filter_list_dma,
rxq_desc.desc, rxq_desc.desc,
mac_address); mac_address);
ibmveth_cleanup(adapter); rc = -ENONET;
napi_disable(&adapter->napi); goto err_out;
return -ENONET;
} }
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
...@@ -622,9 +618,8 @@ static int ibmveth_open(struct net_device *netdev) ...@@ -622,9 +618,8 @@ static int ibmveth_open(struct net_device *netdev)
if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) { if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
netdev_err(netdev, "unable to alloc pool\n"); netdev_err(netdev, "unable to alloc pool\n");
adapter->rx_buff_pool[i].active = 0; adapter->rx_buff_pool[i].active = 0;
ibmveth_cleanup(adapter); rc = -ENOMEM;
napi_disable(&adapter->napi); goto err_out;
return -ENOMEM ;
} }
} }
...@@ -638,27 +633,23 @@ static int ibmveth_open(struct net_device *netdev) ...@@ -638,27 +633,23 @@ static int ibmveth_open(struct net_device *netdev)
rc = h_free_logical_lan(adapter->vdev->unit_address); rc = h_free_logical_lan(adapter->vdev->unit_address);
} while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
ibmveth_cleanup(adapter); goto err_out;
napi_disable(&adapter->napi);
return rc;
} }
adapter->bounce_buffer = adapter->bounce_buffer =
kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL); kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
if (!adapter->bounce_buffer) { if (!adapter->bounce_buffer) {
netdev_err(netdev, "unable to allocate bounce buffer\n"); netdev_err(netdev, "unable to allocate bounce buffer\n");
ibmveth_cleanup(adapter); rc = -ENOMEM;
napi_disable(&adapter->napi); goto err_out;
return -ENOMEM;
} }
adapter->bounce_buffer_dma = adapter->bounce_buffer_dma =
dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer, dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL); netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) { if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
netdev_err(netdev, "unable to map bounce buffer\n"); netdev_err(netdev, "unable to map bounce buffer\n");
ibmveth_cleanup(adapter); rc = -ENOMEM;
napi_disable(&adapter->napi); goto err_out;
return -ENOMEM;
} }
netdev_dbg(netdev, "initial replenish cycle\n"); netdev_dbg(netdev, "initial replenish cycle\n");
...@@ -669,6 +660,11 @@ static int ibmveth_open(struct net_device *netdev) ...@@ -669,6 +660,11 @@ static int ibmveth_open(struct net_device *netdev)
netdev_dbg(netdev, "open complete\n"); netdev_dbg(netdev, "open complete\n");
return 0; return 0;
err_out:
ibmveth_cleanup(adapter);
napi_disable(&adapter->napi);
return rc;
} }
static int ibmveth_close(struct net_device *netdev) static int ibmveth_close(struct net_device *netdev)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册