aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath10k/ce.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 21:40:54 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-08 21:40:54 -0400
commit35a9ad8af0bb0fa3525e6d0d20e32551d226f38e (patch)
tree15b4b33206818886d9cff371fd2163e073b70568 /drivers/net/wireless/ath/ath10k/ce.c
parentMerge tag 'arm64-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc (diff)
parentMerge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net (diff)
downloadlinux-dev-35a9ad8af0bb0fa3525e6d0d20e32551d226f38e.tar.xz
linux-dev-35a9ad8af0bb0fa3525e6d0d20e32551d226f38e.zip
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Most notable changes in here: 1) By far the biggest accomplishment, thanks to a large range of contributors, is the addition of multi-send for transmit. This is the result of discussions back in Chicago, and the hard work of several individuals. Now, when the ->ndo_start_xmit() method of a driver sees skb->xmit_more as true, it can choose to defer the doorbell telling the driver to start processing the new TX queue entires. skb->xmit_more means that the generic networking is guaranteed to call the driver immediately with another SKB to send. There is logic added to the qdisc layer to dequeue multiple packets at a time, and the handling mis-predicted offloads in software is now done with no locks held. Finally, pktgen is extended to have a "burst" parameter that can be used to test a multi-send implementation. Several drivers have xmit_more support: i40e, igb, ixgbe, mlx4, virtio_net Adding support is almost trivial, so export more drivers to support this optimization soon. I want to thank, in no particular or implied order, Jesper Dangaard Brouer, Eric Dumazet, Alexander Duyck, Tom Herbert, Jamal Hadi Salim, John Fastabend, Florian Westphal, Daniel Borkmann, David Tat, Hannes Frederic Sowa, and Rusty Russell. 2) PTP and timestamping support in bnx2x, from Michal Kalderon. 3) Allow adjusting the rx_copybreak threshold for a driver via ethtool, and add rx_copybreak support to enic driver. From Govindarajulu Varadarajan. 4) Significant enhancements to the generic PHY layer and the bcm7xxx driver in particular (EEE support, auto power down, etc.) from Florian Fainelli. 5) Allow raw buffers to be used for flow dissection, allowing drivers to determine the optimal "linear pull" size for devices that DMA into pools of pages. The objective is to get exactly the necessary amount of headers into the linear SKB area pre-pulled, but no more. The new interface drivers use is eth_get_headlen(). From WANG Cong, with driver conversions (several had their own by-hand duplicated implementations) by Alexander Duyck and Eric Dumazet. 6) Support checksumming more smoothly and efficiently for encapsulations, and add "foo over UDP" facility. From Tom Herbert. 7) Add Broadcom SF2 switch driver to DSA layer, from Florian Fainelli. 8) eBPF now can load programs via a system call and has an extensive testsuite. Alexei Starovoitov and Daniel Borkmann. 9) Major overhaul of the packet scheduler to use RCU in several major areas such as the classifiers and rate estimators. From John Fastabend. 10) Add driver for Intel FM10000 Ethernet Switch, from Alexander Duyck. 11) Rearrange TCP_SKB_CB() to reduce cache line misses, from Eric Dumazet. 12) Add Datacenter TCP congestion control algorithm support, From Florian Westphal. 13) Reorganize sk_buff so that __copy_skb_header() is significantly faster. From Eric Dumazet" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1558 commits) netlabel: directly return netlbl_unlabel_genl_init() net: add netdev_txq_bql_{enqueue, complete}_prefetchw() helpers net: description of dma_cookie cause make xmldocs warning cxgb4: clean up a type issue cxgb4: potential shift wrapping bug i40e: skb->xmit_more support net: fs_enet: Add NAPI TX net: fs_enet: Remove non NAPI RX r8169:add support for RTL8168EP net_sched: copy exts->type in tcf_exts_change() wimax: convert printk to pr_foo() af_unix: remove 0 assignment on static ipv6: Do not warn for informational ICMP messages, regardless of type. Update Intel Ethernet Driver maintainers list bridge: Save frag_max_size between PRE_ROUTING and POST_ROUTING tipc: fix bug in multicast congestion handling net: better IFF_XMIT_DST_RELEASE support net/mlx4_en: remove NETDEV_TX_BUSY 3c59x: fix bad split of cpu_to_le32(pci_map_single()) net: bcmgenet: fix Tx ring priority programming ...
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/ce.c')
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c185
1 files changed, 65 insertions, 120 deletions
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 4333107ecf37..101cadb6e4ba 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -260,7 +260,6 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
ath10k_pci_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
}
-
/*
* Guts of ath10k_ce_send, used by both ath10k_ce_send and
* ath10k_ce_sendlist_send.
@@ -284,13 +283,9 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
int ret = 0;
if (nbytes > ce_state->src_sz_max)
- ath10k_warn("%s: send more we can (nbytes: %d, max: %d)\n",
+ ath10k_warn(ar, "%s: send more we can (nbytes: %d, max: %d)\n",
__func__, nbytes, ce_state->src_sz_max);
- ret = ath10k_pci_wake(ar);
- if (ret)
- return ret;
-
if (unlikely(CE_RING_DELTA(nentries_mask,
write_index, sw_index - 1) <= 0)) {
ret = -ENOSR;
@@ -325,7 +320,6 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
src_ring->write_index = write_index;
exit:
- ath10k_pci_sleep(ar);
return ret;
}
@@ -390,49 +384,56 @@ int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
return delta;
}
-int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
- void *per_recv_context,
- u32 buffer)
+int __ath10k_ce_rx_num_free_bufs(struct ath10k_ce_pipe *pipe)
{
- struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
- u32 ctrl_addr = ce_state->ctrl_addr;
- struct ath10k *ar = ce_state->ar;
+ struct ath10k *ar = pipe->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
- unsigned int write_index;
- unsigned int sw_index;
- int ret;
+ unsigned int write_index = dest_ring->write_index;
+ unsigned int sw_index = dest_ring->sw_index;
- spin_lock_bh(&ar_pci->ce_lock);
- write_index = dest_ring->write_index;
- sw_index = dest_ring->sw_index;
+ lockdep_assert_held(&ar_pci->ce_lock);
- ret = ath10k_pci_wake(ar);
- if (ret)
- goto out;
+ return CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
+}
- if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) > 0) {
- struct ce_desc *base = dest_ring->base_addr_owner_space;
- struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
+int __ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ struct ath10k_ce_ring *dest_ring = pipe->dest_ring;
+ unsigned int nentries_mask = dest_ring->nentries_mask;
+ unsigned int write_index = dest_ring->write_index;
+ unsigned int sw_index = dest_ring->sw_index;
+ struct ce_desc *base = dest_ring->base_addr_owner_space;
+ struct ce_desc *desc = CE_DEST_RING_TO_DESC(base, write_index);
+ u32 ctrl_addr = pipe->ctrl_addr;
- /* Update destination descriptor */
- desc->addr = __cpu_to_le32(buffer);
- desc->nbytes = 0;
+ lockdep_assert_held(&ar_pci->ce_lock);
- dest_ring->per_transfer_context[write_index] =
- per_recv_context;
+ if (CE_RING_DELTA(nentries_mask, write_index, sw_index - 1) == 0)
+ return -EIO;
- /* Update Destination Ring Write Index */
- write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
- ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
- dest_ring->write_index = write_index;
- ret = 0;
- } else {
- ret = -EIO;
- }
- ath10k_pci_sleep(ar);
+ desc->addr = __cpu_to_le32(paddr);
+ desc->nbytes = 0;
+
+ dest_ring->per_transfer_context[write_index] = ctx;
+ write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
+ ath10k_ce_dest_ring_write_index_set(ar, ctrl_addr, write_index);
+ dest_ring->write_index = write_index;
+
+ return 0;
+}
+
+int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
+{
+ struct ath10k *ar = pipe->ar;
+ struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ret;
-out:
+ spin_lock_bh(&ar_pci->ce_lock);
+ ret = __ath10k_ce_rx_post_buf(pipe, ctx, paddr);
spin_unlock_bh(&ar_pci->ce_lock);
return ret;
@@ -588,7 +589,6 @@ static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
unsigned int sw_index = src_ring->sw_index;
struct ce_desc *sdesc, *sbase;
unsigned int read_index;
- int ret;
if (src_ring->hw_index == sw_index) {
/*
@@ -599,18 +599,12 @@ static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
* value of the HW index has become stale.
*/
- ret = ath10k_pci_wake(ar);
- if (ret)
- return ret;
-
read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
if (read_index == 0xffffffff)
return -ENODEV;
read_index &= nentries_mask;
src_ring->hw_index = read_index;
-
- ath10k_pci_sleep(ar);
}
read_index = src_ring->hw_index;
@@ -731,11 +725,6 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
u32 ctrl_addr = ce_state->ctrl_addr;
- int ret;
-
- ret = ath10k_pci_wake(ar);
- if (ret)
- return;
spin_lock_bh(&ar_pci->ce_lock);
@@ -760,7 +749,6 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
ath10k_ce_engine_int_status_clear(ar, ctrl_addr, CE_WATERMARK_MASK);
spin_unlock_bh(&ar_pci->ce_lock);
- ath10k_pci_sleep(ar);
}
/*
@@ -771,13 +759,9 @@ void ath10k_ce_per_engine_service(struct ath10k *ar, unsigned int ce_id)
void ath10k_ce_per_engine_service_any(struct ath10k *ar)
{
- int ce_id, ret;
+ int ce_id;
u32 intr_summary;
- ret = ath10k_pci_wake(ar);
- if (ret)
- return;
-
intr_summary = CE_INTERRUPT_SUMMARY(ar);
for (ce_id = 0; intr_summary && (ce_id < CE_COUNT); ce_id++) {
@@ -789,8 +773,6 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
ath10k_ce_per_engine_service(ar, ce_id);
}
-
- ath10k_pci_sleep(ar);
}
/*
@@ -800,16 +782,11 @@ void ath10k_ce_per_engine_service_any(struct ath10k *ar)
*
* Called with ce_lock held.
*/
-static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
- int disable_copy_compl_intr)
+static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state)
{
u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar;
- int ret;
-
- ret = ath10k_pci_wake(ar);
- if (ret)
- return;
+ bool disable_copy_compl_intr = ce_state->attr_flags & CE_ATTR_DIS_INTR;
if ((!disable_copy_compl_intr) &&
(ce_state->send_cb || ce_state->recv_cb))
@@ -818,17 +795,11 @@ static void ath10k_ce_per_engine_handler_adjust(struct ath10k_ce_pipe *ce_state,
ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
-
- ath10k_pci_sleep(ar);
}
int ath10k_ce_disable_interrupts(struct ath10k *ar)
{
- int ce_id, ret;
-
- ret = ath10k_pci_wake(ar);
- if (ret)
- return ret;
+ int ce_id;
for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
u32 ctrl_addr = ath10k_ce_base_address(ce_id);
@@ -838,34 +809,16 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar)
ath10k_ce_watermark_intr_disable(ar, ctrl_addr);
}
- ath10k_pci_sleep(ar);
-
return 0;
}
-void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
- void (*send_cb)(struct ath10k_ce_pipe *),
- int disable_interrupts)
+void ath10k_ce_enable_interrupts(struct ath10k *ar)
{
- struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
+ int ce_id;
- spin_lock_bh(&ar_pci->ce_lock);
- ce_state->send_cb = send_cb;
- ath10k_ce_per_engine_handler_adjust(ce_state, disable_interrupts);
- spin_unlock_bh(&ar_pci->ce_lock);
-}
-
-void ath10k_ce_recv_cb_register(struct ath10k_ce_pipe *ce_state,
- void (*recv_cb)(struct ath10k_ce_pipe *))
-{
- struct ath10k *ar = ce_state->ar;
- struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
-
- spin_lock_bh(&ar_pci->ce_lock);
- ce_state->recv_cb = recv_cb;
- ath10k_ce_per_engine_handler_adjust(ce_state, 0);
- spin_unlock_bh(&ar_pci->ce_lock);
+ for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
+ ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]);
}
static int ath10k_ce_init_src_ring(struct ath10k *ar,
@@ -898,7 +851,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, nentries);
- ath10k_dbg(ATH10K_DBG_BOOT,
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot init ce src ring id %d entries %d base_addr %p\n",
ce_id, nentries, src_ring->base_addr_owner_space);
@@ -932,7 +885,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
ath10k_ce_dest_ring_lowmark_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, nentries);
- ath10k_dbg(ATH10K_DBG_BOOT,
+ ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot ce dest ring id %d entries %d base_addr %p\n",
ce_id, nentries, dest_ring->base_addr_owner_space);
@@ -1067,7 +1020,9 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
* initialized by software/firmware.
*/
int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
- const struct ce_attr *attr)
+ const struct ce_attr *attr,
+ void (*send_cb)(struct ath10k_ce_pipe *),
+ void (*recv_cb)(struct ath10k_ce_pipe *))
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
@@ -1084,39 +1039,37 @@ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
- ret = ath10k_pci_wake(ar);
- if (ret)
- return ret;
-
spin_lock_bh(&ar_pci->ce_lock);
ce_state->ar = ar;
ce_state->id = ce_id;
ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
ce_state->attr_flags = attr->flags;
ce_state->src_sz_max = attr->src_sz_max;
+ if (attr->src_nentries)
+ ce_state->send_cb = send_cb;
+ if (attr->dest_nentries)
+ ce_state->recv_cb = recv_cb;
spin_unlock_bh(&ar_pci->ce_lock);
if (attr->src_nentries) {
ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
if (ret) {
- ath10k_err("Failed to initialize CE src ring for ID: %d (%d)\n",
+ ath10k_err(ar, "Failed to initialize CE src ring for ID: %d (%d)\n",
ce_id, ret);
- goto out;
+ return ret;
}
}
if (attr->dest_nentries) {
ret = ath10k_ce_init_dest_ring(ar, ce_id, attr);
if (ret) {
- ath10k_err("Failed to initialize CE dest ring for ID: %d (%d)\n",
+ ath10k_err(ar, "Failed to initialize CE dest ring for ID: %d (%d)\n",
ce_id, ret);
- goto out;
+ return ret;
}
}
-out:
- ath10k_pci_sleep(ar);
- return ret;
+ return 0;
}
static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
@@ -1140,16 +1093,8 @@ static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
{
- int ret;
-
- ret = ath10k_pci_wake(ar);
- if (ret)
- return;
-
ath10k_ce_deinit_src_ring(ar, ce_id);
ath10k_ce_deinit_dest_ring(ar, ce_id);
-
- ath10k_pci_sleep(ar);
}
int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
@@ -1163,7 +1108,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
if (IS_ERR(ce_state->src_ring)) {
ret = PTR_ERR(ce_state->src_ring);
- ath10k_err("failed to allocate copy engine source ring %d: %d\n",
+ ath10k_err(ar, "failed to allocate copy engine source ring %d: %d\n",
ce_id, ret);
ce_state->src_ring = NULL;
return ret;
@@ -1175,7 +1120,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
attr);
if (IS_ERR(ce_state->dest_ring)) {
ret = PTR_ERR(ce_state->dest_ring);
- ath10k_err("failed to allocate copy engine destination ring %d: %d\n",
+ ath10k_err(ar, "failed to allocate copy engine destination ring %d: %d\n",
ce_id, ret);
ce_state->dest_ring = NULL;
return ret;