aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/qlge/qlge_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/staging/qlge/qlge_main.c')
-rw-r--r--drivers/staging/qlge/qlge_main.c53
1 files changed, 28 insertions, 25 deletions
diff --git a/drivers/staging/qlge/qlge_main.c b/drivers/staging/qlge/qlge_main.c
index 9873bb2a9ee4..1ead7793062a 100644
--- a/drivers/staging/qlge/qlge_main.c
+++ b/drivers/staging/qlge/qlge_main.c
@@ -1976,7 +1976,7 @@ static unsigned long qlge_process_mac_rx_intr(struct qlge_adapter *qdev,
vlan_id);
} else {
/* Non-TCP/UDP large frames that span multiple buffers
- * can be processed corrrectly by the split frame logic.
+ * can be processed correctly by the split frame logic.
*/
qlge_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
vlan_id);
@@ -2461,7 +2461,7 @@ static int qlge_tso(struct sk_buff *skb, struct qlge_ob_mac_tso_iocb_req *mac_io
mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
mac_iocb_ptr->frame_len = cpu_to_le32((u32)skb->len);
mac_iocb_ptr->total_hdrs_len =
- cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
+ cpu_to_le16(skb_tcp_all_headers(skb));
mac_iocb_ptr->net_trans_offset =
cpu_to_le16(skb_network_offset(skb) |
skb_transport_offset(skb)
@@ -2955,7 +2955,7 @@ static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring
void __iomem *doorbell_area =
qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
int err = 0;
- u64 tmp;
+ u64 dma;
__le64 *base_indirect_ptr;
int page_entries;
@@ -3004,15 +3004,15 @@ static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring
FLAGS_LI; /* Load irq delay values */
if (rx_ring->cq_id < qdev->rss_ring_count) {
cqicb->flags |= FLAGS_LL; /* Load lbq values */
- tmp = (u64)rx_ring->lbq.base_dma;
+ dma = (u64)rx_ring->lbq.base_dma;
base_indirect_ptr = rx_ring->lbq.base_indirect;
- page_entries = 0;
- do {
- *base_indirect_ptr = cpu_to_le64(tmp);
- tmp += DB_PAGE_SIZE;
- base_indirect_ptr++;
- page_entries++;
- } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
+
+ for (page_entries = 0;
+ page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN);
+ page_entries++) {
+ base_indirect_ptr[page_entries] = cpu_to_le64(dma);
+ dma += DB_PAGE_SIZE;
+ }
cqicb->lbq_addr = cpu_to_le64(rx_ring->lbq.base_indirect_dma);
cqicb->lbq_buf_size =
cpu_to_le16(QLGE_FIT16(qdev->lbq_buf_size));
@@ -3021,15 +3021,15 @@ static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring
rx_ring->lbq.next_to_clean = 0;
cqicb->flags |= FLAGS_LS; /* Load sbq values */
- tmp = (u64)rx_ring->sbq.base_dma;
+ dma = (u64)rx_ring->sbq.base_dma;
base_indirect_ptr = rx_ring->sbq.base_indirect;
- page_entries = 0;
- do {
- *base_indirect_ptr = cpu_to_le64(tmp);
- tmp += DB_PAGE_SIZE;
- base_indirect_ptr++;
- page_entries++;
- } while (page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN));
+
+ for (page_entries = 0;
+ page_entries < MAX_DB_PAGES_PER_BQ(QLGE_BQ_LEN);
+ page_entries++) {
+ base_indirect_ptr[page_entries] = cpu_to_le64(dma);
+ dma += DB_PAGE_SIZE;
+ }
cqicb->sbq_addr =
cpu_to_le64(rx_ring->sbq.base_indirect_dma);
cqicb->sbq_buf_size = cpu_to_le16(SMALL_BUFFER_SIZE);
@@ -3041,8 +3041,8 @@ static int qlge_start_rx_ring(struct qlge_adapter *qdev, struct rx_ring *rx_ring
/* Inbound completion handling rx_rings run in
* separate NAPI contexts.
*/
- netif_napi_add(qdev->ndev, &rx_ring->napi, qlge_napi_poll_msix,
- 64);
+ netif_napi_add(qdev->ndev, &rx_ring->napi,
+ qlge_napi_poll_msix);
cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
} else {
@@ -4605,14 +4605,12 @@ static int qlge_probe(struct pci_dev *pdev,
err = register_netdev(ndev);
if (err) {
dev_err(&pdev->dev, "net device registration failed.\n");
- qlge_release_all(pdev);
- pci_disable_device(pdev);
- goto netdev_free;
+ goto cleanup_pdev;
}
err = qlge_health_create_reporters(qdev);
if (err)
- goto netdev_free;
+ goto unregister_netdev;
/* Start up the timer to trigger EEH if
* the bus goes dead
@@ -4626,6 +4624,11 @@ static int qlge_probe(struct pci_dev *pdev,
devlink_register(devlink);
return 0;
+unregister_netdev:
+ unregister_netdev(ndev);
+cleanup_pdev:
+ qlge_release_all(pdev);
+ pci_disable_device(pdev);
netdev_free:
free_netdev(ndev);
devlink_free: