aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/ath/ath10k/ce.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/ath/ath10k/ce.c')
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c183
1 files changed, 142 insertions, 41 deletions
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 2a5668b4f6bc..24b983edb357 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1,19 +1,8 @@
+// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "hif.h"
@@ -228,11 +217,31 @@ ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar,
}
static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
- u32 ce_ctrl_addr,
- unsigned int addr)
+ u32 ce_id,
+ u64 addr)
+{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+ u32 addr_lo = lower_32_bits(addr);
+
+ ath10k_ce_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->sr_base_addr_lo, addr_lo);
+
+ if (ce_state->ops->ce_set_src_ring_base_addr_hi) {
+ ce_state->ops->ce_set_src_ring_base_addr_hi(ar, ce_ctrl_addr,
+ addr);
+ }
+}
+
+static void ath10k_ce_set_src_ring_base_addr_hi(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ u64 addr)
{
+ u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
+
ath10k_ce_write32(ar, ce_ctrl_addr +
- ar->hw_ce_regs->sr_base_addr, addr);
+ ar->hw_ce_regs->sr_base_addr_hi, addr_hi);
}
static inline void ath10k_ce_src_ring_size_set(struct ath10k *ar,
@@ -313,11 +322,36 @@ static inline u32 ath10k_ce_dest_ring_read_index_get(struct ath10k *ar,
}
static inline void ath10k_ce_dest_ring_base_addr_set(struct ath10k *ar,
- u32 ce_ctrl_addr,
- u32 addr)
+ u32 ce_id,
+ u64 addr)
{
+ struct ath10k_ce *ce = ath10k_ce_priv(ar);
+ struct ath10k_ce_pipe *ce_state = &ce->ce_states[ce_id];
+ u32 ce_ctrl_addr = ath10k_ce_base_address(ar, ce_id);
+ u32 addr_lo = lower_32_bits(addr);
+
ath10k_ce_write32(ar, ce_ctrl_addr +
- ar->hw_ce_regs->dr_base_addr, addr);
+ ar->hw_ce_regs->dr_base_addr_lo, addr_lo);
+
+ if (ce_state->ops->ce_set_dest_ring_base_addr_hi) {
+ ce_state->ops->ce_set_dest_ring_base_addr_hi(ar, ce_ctrl_addr,
+ addr);
+ }
+}
+
+static void ath10k_ce_set_dest_ring_base_addr_hi(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ u64 addr)
+{
+ u32 addr_hi = upper_32_bits(addr) & CE_DESC_ADDR_HI_MASK;
+ u32 reg_value;
+
+ reg_value = ath10k_ce_read32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->dr_base_addr_hi);
+ reg_value &= ~CE_DESC_ADDR_HI_MASK;
+ reg_value |= addr_hi;
+ ath10k_ce_write32(ar, ce_ctrl_addr +
+ ar->hw_ce_regs->dr_base_addr_hi, reg_value);
}
static inline void ath10k_ce_dest_ring_size_set(struct ath10k *ar,
@@ -500,14 +534,8 @@ static int _ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
/* WORKAROUND */
- if (!(flags & CE_SEND_FLAG_GATHER)) {
- if (ar->hw_params.shadow_reg_support)
- ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
- write_index);
- else
- ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
- write_index);
- }
+ if (!(flags & CE_SEND_FLAG_GATHER))
+ ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
src_ring->write_index = write_index;
exit:
@@ -563,7 +591,7 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
addr = (__le32 *)&sdesc.addr;
- flags |= upper_32_bits(buffer) & CE_DESC_FLAGS_GET_MASK;
+ flags |= upper_32_bits(buffer) & CE_DESC_ADDR_HI_MASK;
addr[0] = __cpu_to_le32(buffer);
addr[1] = __cpu_to_le32(flags);
if (flags & CE_SEND_FLAG_GATHER)
@@ -581,8 +609,14 @@ static int _ath10k_ce_send_nolock_64(struct ath10k_ce_pipe *ce_state,
/* Update Source Ring Write Index */
write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
- if (!(flags & CE_SEND_FLAG_GATHER))
- ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
+ if (!(flags & CE_SEND_FLAG_GATHER)) {
+ if (ar->hw_params.shadow_reg_support)
+ ath10k_ce_shadow_src_ring_write_index_set(ar, ce_state,
+ write_index);
+ else
+ ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
+ write_index);
+ }
src_ring->write_index = write_index;
exit:
@@ -731,7 +765,7 @@ static int __ath10k_ce_rx_post_buf_64(struct ath10k_ce_pipe *pipe,
return -ENOSPC;
desc->addr = __cpu_to_le64(paddr);
- desc->addr &= __cpu_to_le64(CE_DESC_37BIT_ADDR_MASK);
+ desc->addr &= __cpu_to_le64(CE_DESC_ADDR_MASK);
desc->nbytes = 0;
@@ -1032,8 +1066,8 @@ EXPORT_SYMBOL(ath10k_ce_revoke_recv_next);
* Guts of ath10k_ce_completed_send_next.
* The caller takes responsibility for any necessary locking.
*/
-int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
- void **per_transfer_contextp)
+static int _ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp)
{
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
u32 ctrl_addr = ce_state->ctrl_addr;
@@ -1084,6 +1118,66 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
return 0;
}
+
+static int _ath10k_ce_completed_send_next_nolock_64(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp)
+{
+ struct ath10k_ce_ring *src_ring = ce_state->src_ring;
+ u32 ctrl_addr = ce_state->ctrl_addr;
+ struct ath10k *ar = ce_state->ar;
+ unsigned int nentries_mask = src_ring->nentries_mask;
+ unsigned int sw_index = src_ring->sw_index;
+ unsigned int read_index;
+ struct ce_desc_64 *desc;
+
+ if (src_ring->hw_index == sw_index) {
+ /*
+ * The SW completion index has caught up with the cached
+ * version of the HW completion index.
+ * Update the cached HW completion index to see whether
+ * the SW has really caught up to the HW, or if the cached
+ * value of the HW index has become stale.
+ */
+
+ read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ if (read_index == 0xffffffff)
+ return -ENODEV;
+
+ read_index &= nentries_mask;
+ src_ring->hw_index = read_index;
+ }
+
+ if (ar->hw_params.rri_on_ddr)
+ read_index = ath10k_ce_src_ring_read_index_get(ar, ctrl_addr);
+ else
+ read_index = src_ring->hw_index;
+
+ if (read_index == sw_index)
+ return -EIO;
+
+ if (per_transfer_contextp)
+ *per_transfer_contextp =
+ src_ring->per_transfer_context[sw_index];
+
+ /* sanity */
+ src_ring->per_transfer_context[sw_index] = NULL;
+ desc = CE_SRC_RING_TO_DESC_64(src_ring->base_addr_owner_space,
+ sw_index);
+ desc->nbytes = 0;
+
+ /* Update sw_index */
+ sw_index = CE_RING_IDX_INCR(nentries_mask, sw_index);
+ src_ring->sw_index = sw_index;
+
+ return 0;
+}
+
+int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
+ void **per_transfer_contextp)
+{
+ return ce_state->ops->ce_completed_send_next_nolock(ce_state,
+ per_transfer_contextp);
+}
EXPORT_SYMBOL(ath10k_ce_completed_send_next_nolock);
static void ath10k_ce_extract_desc_data(struct ath10k *ar,
@@ -1346,7 +1440,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
ath10k_ce_src_ring_write_index_get(ar, ctrl_addr);
src_ring->write_index &= src_ring->nentries_mask;
- ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr,
+ ath10k_ce_src_ring_base_addr_set(ar, ce_id,
src_ring->base_addr_ce_space);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, attr->src_sz_max);
@@ -1385,7 +1479,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
ath10k_ce_dest_ring_write_index_get(ar, ctrl_addr);
dest_ring->write_index &= dest_ring->nentries_mask;
- ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr,
+ ath10k_ce_dest_ring_base_addr_set(ar, ce_id,
dest_ring->base_addr_ce_space);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, nentries);
ath10k_ce_dest_ring_byte_swap_set(ar, ctrl_addr, 0);
@@ -1404,12 +1498,12 @@ static int ath10k_ce_alloc_shadow_base(struct ath10k *ar,
u32 nentries)
{
src_ring->shadow_base_unaligned = kcalloc(nentries,
- sizeof(struct ce_desc),
+ sizeof(struct ce_desc_64),
GFP_KERNEL);
if (!src_ring->shadow_base_unaligned)
return -ENOMEM;
- src_ring->shadow_base = (struct ce_desc *)
+ src_ring->shadow_base = (struct ce_desc_64 *)
PTR_ALIGN(src_ring->shadow_base_unaligned,
CE_DESC_RING_ALIGN);
return 0;
@@ -1461,7 +1555,7 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
ret = ath10k_ce_alloc_shadow_base(ar, src_ring, nentries);
if (ret) {
dma_free_coherent(ar->dev,
- (nentries * sizeof(struct ce_desc) +
+ (nentries * sizeof(struct ce_desc_64) +
CE_DESC_RING_ALIGN),
src_ring->base_addr_owner_space_unaligned,
base_addr);
@@ -1554,7 +1648,8 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
*/
dest_ring->base_addr_owner_space_unaligned =
dma_alloc_coherent(ar->dev,
- (nentries * sizeof(struct ce_desc) + CE_DESC_RING_ALIGN),
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
&base_addr, GFP_KERNEL);
if (!dest_ring->base_addr_owner_space_unaligned) {
kfree(dest_ring);
@@ -1660,7 +1755,7 @@ static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
{
u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
- ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
+ ath10k_ce_src_ring_base_addr_set(ar, ce_id, 0);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_dmax_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_highmark_set(ar, ctrl_addr, 0);
@@ -1670,7 +1765,7 @@ static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
{
u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
- ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
+ ath10k_ce_dest_ring_base_addr_set(ar, ce_id, 0);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_highmark_set(ar, ctrl_addr, 0);
}
@@ -1802,6 +1897,9 @@ static const struct ath10k_ce_ops ce_ops = {
.ce_extract_desc_data = ath10k_ce_extract_desc_data,
.ce_free_pipe = _ath10k_ce_free_pipe,
.ce_send_nolock = _ath10k_ce_send_nolock,
+ .ce_set_src_ring_base_addr_hi = NULL,
+ .ce_set_dest_ring_base_addr_hi = NULL,
+ .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock,
};
static const struct ath10k_ce_ops ce_64_ops = {
@@ -1814,6 +1912,9 @@ static const struct ath10k_ce_ops ce_64_ops = {
.ce_extract_desc_data = ath10k_ce_extract_desc_data_64,
.ce_free_pipe = _ath10k_ce_free_pipe_64,
.ce_send_nolock = _ath10k_ce_send_nolock_64,
+ .ce_set_src_ring_base_addr_hi = ath10k_ce_set_src_ring_base_addr_hi,
+ .ce_set_dest_ring_base_addr_hi = ath10k_ce_set_dest_ring_base_addr_hi,
+ .ce_completed_send_next_nolock = _ath10k_ce_completed_send_next_nolock_64,
};
static void ath10k_ce_set_ops(struct ath10k *ar,
@@ -1909,7 +2010,7 @@ void ath10k_ce_alloc_rri(struct ath10k *ar)
lower_32_bits(ce->paddr_rri));
ath10k_ce_write32(ar, ar->hw_ce_regs->ce_rri_high,
(upper_32_bits(ce->paddr_rri) &
- CE_DESC_FLAGS_GET_MASK));
+ CE_DESC_ADDR_HI_MASK));
for (i = 0; i < CE_COUNT; i++) {
ctrl1_regs = ar->hw_ce_regs->ctrl1_regs->addr;