aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ipa/gsi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ipa/gsi.c')
-rw-r--r--drivers/net/ipa/gsi.c464
1 files changed, 239 insertions, 225 deletions
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index a2fcdb1abdb9..bea2da1c4c51 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -56,9 +56,9 @@
* element can also contain an immediate command, requesting the IPA perform
* actions other than data transfer.
*
- * Each TRE refers to a block of data--also located DRAM. After writing one
- * or more TREs to a channel, the writer (either the IPA or an EE) writes a
- * doorbell register to inform the receiving side how many elements have
+ * Each TRE refers to a block of data--also located in DRAM. After writing
+ * one or more TREs to a channel, the writer (either the IPA or an EE) writes
+ * a doorbell register to inform the receiving side how many elements have
* been written.
*
* Each channel has a GSI "event ring" associated with it. An event ring
@@ -93,6 +93,7 @@
#define GSI_CHANNEL_STOP_RETRIES 10
#define GSI_CHANNEL_MODEM_HALT_RETRIES 10
+#define GSI_CHANNEL_MODEM_FLOW_RETRIES 5 /* disable flow control only */
#define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
#define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */
@@ -339,10 +340,10 @@ static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
* completion to be signaled. Returns true if the command completes
* or false if it times out.
*/
-static bool
-gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
+static bool gsi_command(struct gsi *gsi, u32 reg, u32 val)
{
unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
+ struct completion *completion = &gsi->completion;
reinit_completion(completion);
@@ -366,8 +367,6 @@ gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
enum gsi_evt_cmd_opcode opcode)
{
- struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- struct completion *completion = &evt_ring->completion;
struct device *dev = gsi->dev;
bool timeout;
u32 val;
@@ -378,7 +377,7 @@ static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
- timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val);
gsi_irq_ev_ctrl_disable(gsi);
@@ -478,7 +477,6 @@ static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
static void
gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
{
- struct completion *completion = &channel->completion;
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
struct device *dev = gsi->dev;
@@ -490,7 +488,7 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
val = u32_encode_bits(channel_id, CH_CHID_FMASK);
val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
- timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val);
gsi_irq_ch_ctrl_disable(gsi);
@@ -667,7 +665,8 @@ static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
+ struct gsi_ring *ring = &evt_ring->ring;
+ size_t size;
u32 val;
/* We program all event rings as GPI type/protocol */
@@ -676,6 +675,7 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
+ size = ring->count * GSI_RING_ELEMENT_SIZE;
val = ev_r_length_encoded(gsi->version, size);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
@@ -683,9 +683,9 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
* high-order 32 bits of the address of the event ring,
* respectively.
*/
- val = lower_32_bits(evt_ring->ring.addr);
+ val = lower_32_bits(ring->addr);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
- val = upper_32_bits(evt_ring->ring.addr);
+ val = upper_32_bits(ring->addr);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
/* Enable interrupt moderation by setting the moderation delay */
@@ -702,48 +702,40 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
- /* Finally, tell the hardware we've completed event 0 (arbitrary) */
- gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
+ /* Finally, tell the hardware our "last processed" event (arbitrary) */
+ gsi_evt_ring_doorbell(gsi, evt_ring_id, ring->index);
}
/* Find the transaction whose completion indicates a channel is quiesced */
static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
- const struct list_head *list;
+ u32 pending_id = trans_info->pending_id;
struct gsi_trans *trans;
-
- spin_lock_bh(&trans_info->spinlock);
-
- /* There is a small chance a TX transaction got allocated just
- * before we disabled transmits, so check for that.
- */
- if (channel->toward_ipa) {
- list = &trans_info->alloc;
- if (!list_empty(list))
- goto done;
- list = &trans_info->pending;
- if (!list_empty(list))
- goto done;
+ u16 trans_id;
+
+ if (channel->toward_ipa && pending_id != trans_info->free_id) {
+ /* There is a small chance a TX transaction got allocated
+ * just before we disabled transmits, so check for that.
+ * The last allocated, committed, or pending transaction
+ * precedes the first free transaction.
+ */
+ trans_id = trans_info->free_id - 1;
+ } else if (trans_info->polled_id != pending_id) {
+ /* Otherwise (TX or RX) we want to wait for anything that
+ * has completed, or has been polled but not released yet.
+ *
+ * The last completed or polled transaction precedes the
+ * first pending transaction.
+ */
+ trans_id = pending_id - 1;
+ } else {
+ return NULL;
}
- /* Otherwise (TX or RX) we want to wait for anything that
- * has completed, or has been polled but not released yet.
- */
- list = &trans_info->complete;
- if (!list_empty(list))
- goto done;
- list = &trans_info->polled;
- if (list_empty(list))
- list = NULL;
-done:
- trans = list ? list_last_entry(list, struct gsi_trans, links) : NULL;
-
/* Caller will wait for this, so take a reference */
- if (trans)
- refcount_inc(&trans->refcount);
-
- spin_unlock_bh(&trans_info->spinlock);
+ trans = &trans_info->trans[trans_id % channel->tre_count];
+ refcount_inc(&trans->refcount);
return trans;
}
@@ -772,9 +764,6 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
u32 wrr_weight = 0;
u32 val;
- /* Arbitrarily pick TRE 0 as the first channel element to use */
- channel->tre_ring.index = 0;
-
/* We program all channels as GPI type/protocol */
val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI);
if (channel->toward_ipa)
@@ -825,7 +814,7 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
/* Now update the scratch registers for GPI protocol */
gpi = &scr.gpi;
- gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
+ gpi->max_outstanding_tre = channel->trans_tre_max *
GSI_RING_ELEMENT_SIZE;
gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
@@ -951,6 +940,8 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa)
gsi_channel_reset_command(channel);
+ /* Hardware assumes this is 0 following reset */
+ channel->tre_ring.index = 0;
gsi_channel_program(channel, doorbell);
gsi_channel_trans_cancel_pending(channel);
@@ -993,75 +984,66 @@ void gsi_resume(struct gsi *gsi)
enable_irq(gsi->irq);
}
-/**
- * gsi_channel_tx_queued() - Report queued TX transfers for a channel
- * @channel: Channel for which to report
- *
- * Report to the network stack the number of bytes and transactions that
- * have been queued to hardware since last call. This and the next function
- * supply information used by the network stack for throttling.
- *
- * For each channel we track the number of transactions used and bytes of
- * data those transactions represent. We also track what those values are
- * each time this function is called. Subtracting the two tells us
- * the number of bytes and transactions that have been added between
- * successive calls.
- *
- * Calling this each time we ring the channel doorbell allows us to
- * provide accurate information to the network stack about how much
- * work we've given the hardware at any point in time.
- */
-void gsi_channel_tx_queued(struct gsi_channel *channel)
+void gsi_trans_tx_committed(struct gsi_trans *trans)
{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+
+ channel->trans_count++;
+ channel->byte_count += trans->len;
+
+ trans->trans_count = channel->trans_count;
+ trans->byte_count = channel->byte_count;
+}
+
+void gsi_trans_tx_queued(struct gsi_trans *trans)
+{
+ u32 channel_id = trans->channel_id;
+ struct gsi *gsi = trans->gsi;
+ struct gsi_channel *channel;
u32 trans_count;
u32 byte_count;
+ channel = &gsi->channel[channel_id];
+
byte_count = channel->byte_count - channel->queued_byte_count;
trans_count = channel->trans_count - channel->queued_trans_count;
channel->queued_byte_count = channel->byte_count;
channel->queued_trans_count = channel->trans_count;
- ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
- trans_count, byte_count);
+ ipa_gsi_channel_tx_queued(gsi, channel_id, trans_count, byte_count);
}
/**
- * gsi_channel_tx_update() - Report completed TX transfers
- * @channel: Channel that has completed transmitting packets
- * @trans: Last transation known to be complete
- *
- * Compute the number of transactions and bytes that have been transferred
- * over a TX channel since the given transaction was committed. Report this
- * information to the network stack.
+ * gsi_trans_tx_completed() - Report completed TX transactions
+ * @trans: TX channel transaction that has completed
*
- * At the time a transaction is committed, we record its channel's
- * committed transaction and byte counts *in the transaction*.
- * Completions are signaled by the hardware with an interrupt, and
- * we can determine the latest completed transaction at that time.
+ * Report that a transaction on a TX channel has completed. At the time a
+ * transaction is committed, we record *in the transaction* its channel's
+ * committed transaction and byte counts. Transactions are completed in
+ * order, and the difference between the channel's byte/transaction count
+ * when the transaction was committed and when it completes tells us
+ * exactly how much data has been transferred while the transaction was
+ * pending.
*
- * The difference between the byte/transaction count recorded in
- * the transaction and the count last time we recorded a completion
- * tells us exactly how much data has been transferred between
- * completions.
- *
- * Calling this each time we learn of a newly-completed transaction
- * allows us to provide accurate information to the network stack
- * about how much work has been completed by the hardware at a given
- * point in time.
+ * We report this information to the network stack, which uses it to manage
+ * the rate at which data is sent to hardware.
*/
-static void
-gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
+static void gsi_trans_tx_completed(struct gsi_trans *trans)
{
- u64 byte_count = trans->byte_count + trans->len;
- u64 trans_count = trans->trans_count + 1;
+ u32 channel_id = trans->channel_id;
+ struct gsi *gsi = trans->gsi;
+ struct gsi_channel *channel;
+ u32 trans_count;
+ u32 byte_count;
+
+ channel = &gsi->channel[channel_id];
+ trans_count = trans->trans_count - channel->compl_trans_count;
+ byte_count = trans->byte_count - channel->compl_byte_count;
- byte_count -= channel->compl_byte_count;
- channel->compl_byte_count += byte_count;
- trans_count -= channel->compl_trans_count;
channel->compl_trans_count += trans_count;
+ channel->compl_byte_count += byte_count;
- ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
- trans_count, byte_count);
+ ipa_gsi_channel_tx_completed(gsi, channel_id, trans_count, byte_count);
}
/* Channel control interrupt handler */
@@ -1074,13 +1056,10 @@ static void gsi_isr_chan_ctrl(struct gsi *gsi)
while (channel_mask) {
u32 channel_id = __ffs(channel_mask);
- struct gsi_channel *channel;
channel_mask ^= BIT(channel_id);
- channel = &gsi->channel[channel_id];
-
- complete(&channel->completion);
+ complete(&gsi->completion);
}
}
@@ -1094,13 +1073,10 @@ static void gsi_isr_evt_ctrl(struct gsi *gsi)
while (event_mask) {
u32 evt_ring_id = __ffs(event_mask);
- struct gsi_evt_ring *evt_ring;
event_mask ^= BIT(evt_ring_id);
- evt_ring = &gsi->evt_ring[evt_ring_id];
-
- complete(&evt_ring->completion);
+ complete(&gsi->completion);
}
}
@@ -1110,7 +1086,7 @@ gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
{
if (code == GSI_OUT_OF_RESOURCES) {
dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
- complete(&gsi->channel[channel_id].completion);
+ complete(&gsi->completion);
return;
}
@@ -1127,7 +1103,7 @@ gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
u32 channel_id = gsi_channel_id(evt_ring->channel);
- complete(&evt_ring->completion);
+ complete(&gsi->completion);
dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
channel_id);
return;
@@ -1171,26 +1147,31 @@ static void gsi_isr_gp_int1(struct gsi *gsi)
u32 result;
u32 val;
- /* This interrupt is used to handle completions of the two GENERIC
- * GSI commands. We use these to allocate and halt channels on
- * the modem's behalf due to a hardware quirk on IPA v4.2. Once
- * allocated, the modem "owns" these channels, and as a result we
- * have no way of knowing the channel's state at any given time.
+ /* This interrupt is used to handle completions of GENERIC GSI
+ * commands. We use these to allocate and halt channels on the
+ * modem's behalf due to a hardware quirk on IPA v4.2. The modem
+ * "owns" channels even when the AP allocates them, and have no
+ * way of knowing whether a modem channel's state has been changed.
+ *
+ * We also use GENERIC commands to enable/disable channel flow
+ * control for IPA v4.2+.
*
* It is recommended that we halt the modem channels we allocated
* when shutting down, but it's possible the channel isn't running
* at the time we issue the HALT command. We'll get an error in
* that case, but it's harmless (the channel is already halted).
+ * Similarly, we could get an error back when updating flow control
+ * on a channel because it's not in the proper state.
*
- * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error
- * if we receive it.
+ * In either case, we silently ignore a INCORRECT_CHANNEL_STATE
+ * error if we receive it.
*/
val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
switch (result) {
case GENERIC_EE_SUCCESS:
- case GENERIC_EE_CHANNEL_NOT_RUNNING:
+ case GENERIC_EE_INCORRECT_CHANNEL_STATE:
gsi->result = 0;
break;
@@ -1330,60 +1311,73 @@ static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
}
/* Return the transaction associated with a transfer completion event */
-static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
- struct gsi_event *event)
+static struct gsi_trans *
+gsi_event_trans(struct gsi *gsi, struct gsi_event *event)
{
+ u32 channel_id = event->chid;
+ struct gsi_channel *channel;
+ struct gsi_trans *trans;
u32 tre_offset;
u32 tre_index;
+ channel = &gsi->channel[channel_id];
+ if (WARN(!channel->gsi, "event has bad channel %u\n", channel_id))
+ return NULL;
+
/* Event xfer_ptr records the TRE it's associated with */
tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr));
tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
- return gsi_channel_trans_mapped(channel, tre_index);
+ trans = gsi_channel_trans_mapped(channel, tre_index);
+
+ if (WARN(!trans, "channel %u event with no transaction\n", channel_id))
+ return NULL;
+
+ return trans;
}
/**
- * gsi_evt_ring_rx_update() - Record lengths of received data
- * @evt_ring: Event ring associated with channel that received packets
- * @index: Event index in ring reported by hardware
+ * gsi_evt_ring_update() - Update transaction state from hardware
+ * @gsi: GSI pointer
+ * @evt_ring_id: Event ring ID
+ * @index: Event index in ring reported by hardware
*
* Events for RX channels contain the actual number of bytes received into
* the buffer. Every event has a transaction associated with it, and here
* we update transactions to record their actual received lengths.
*
+ * When an event for a TX channel arrives we use information in the
+ * transaction to report the number of requests and bytes that have
+ * been transferred.
+ *
* This function is called whenever we learn that the GSI hardware has filled
* new events since the last time we checked. The ring's index field tells
* the first entry in need of processing. The index provided is the
* first *unfilled* event in the ring (following the last filled one).
*
* Events are sequential within the event ring, and transactions are
- * sequential within the transaction pool.
+ * sequential within the transaction array.
*
* Note that @index always refers to an element *within* the event ring.
*/
-static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
+static void gsi_evt_ring_update(struct gsi *gsi, u32 evt_ring_id, u32 index)
{
- struct gsi_channel *channel = evt_ring->channel;
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct gsi_ring *ring = &evt_ring->ring;
- struct gsi_trans_info *trans_info;
struct gsi_event *event_done;
struct gsi_event *event;
- struct gsi_trans *trans;
- u32 byte_count = 0;
- u32 old_index;
u32 event_avail;
+ u32 old_index;
- trans_info = &channel->trans_info;
-
- /* We'll start with the oldest un-processed event. RX channels
- * replenish receive buffers in single-TRE transactions, so we
- * can just map that event to its transaction. Transactions
- * associated with completion events are consecutive.
+ /* Starting with the oldest un-processed event, determine which
+ * transaction (and which channel) is associated with the event.
+ * For RX channels, update each completed transaction with the
+ * number of bytes that were actually received. For TX channels
+ * associated with a network device, report to the network stack
+ * the number of transfers and bytes this completion represents.
*/
old_index = ring->index;
event = gsi_ring_virt(ring, old_index);
- trans = gsi_event_trans(channel, event);
/* Compute the number of events to process before we wrap,
* and determine when we'll be done processing events.
@@ -1391,20 +1385,28 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
event_avail = ring->count - old_index % ring->count;
event_done = gsi_ring_virt(ring, index);
do {
- trans->len = __le16_to_cpu(event->len);
- byte_count += trans->len;
+ struct gsi_trans *trans;
+
+ trans = gsi_event_trans(gsi, event);
+ if (!trans)
+ return;
+
+ if (trans->direction == DMA_FROM_DEVICE)
+ trans->len = __le16_to_cpu(event->len);
+ else
+ gsi_trans_tx_completed(trans);
+
+ gsi_trans_move_complete(trans);
/* Move on to the next event and transaction */
if (--event_avail)
event++;
else
event = gsi_ring_virt(ring, 0);
- trans = gsi_trans_pool_next(&trans_info->pool, trans);
} while (event != event_done);
- /* We record RX bytes when they are received */
- channel->byte_count += byte_count;
- channel->trans_count++;
+ /* Tell the hardware we've handled these events */
+ gsi_evt_ring_doorbell(gsi, evt_ring_id, index);
}
/* Initialize a ring, including allocating DMA memory for its entries */
@@ -1424,6 +1426,7 @@ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
ring->addr = addr;
ring->count = count;
+ ring->index = 0;
return 0;
}
@@ -1471,8 +1474,8 @@ void gsi_channel_doorbell(struct gsi_channel *channel)
iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
}
-/* Consult hardware, move any newly completed transactions to completed list */
-static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
+/* Consult hardware, move newly completed transactions to completed state */
+void gsi_channel_update(struct gsi_channel *channel)
{
u32 evt_ring_id = channel->evt_ring_id;
struct gsi *gsi = channel->gsi;
@@ -1491,33 +1494,19 @@ static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
if (index == ring->index % ring->count)
- return NULL;
+ return;
- /* Get the transaction for the latest completed event. Take a
- * reference to keep it from completing before we give the events
- * for this and previous transactions back to the hardware.
- */
- trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
- refcount_inc(&trans->refcount);
+ /* Get the transaction for the latest completed event. */
+ trans = gsi_event_trans(gsi, gsi_ring_virt(ring, index - 1));
+ if (!trans)
+ return;
/* For RX channels, update each completed transaction with the number
* of bytes that were actually received. For TX channels, report
* the number of transactions and bytes this completion represents
* up the network stack.
*/
- if (channel->toward_ipa)
- gsi_channel_tx_update(channel, trans);
- else
- gsi_evt_ring_rx_update(evt_ring, index);
-
- gsi_trans_move_complete(trans);
-
- /* Tell the hardware we've handled these events */
- gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
-
- gsi_trans_free(trans);
-
- return gsi_channel_trans_complete(channel);
+ gsi_evt_ring_update(gsi, evt_ring_id, index);
}
/**
@@ -1526,21 +1515,18 @@ static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
*
* Return: Transaction pointer, or null if none are available
*
- * This function returns the first entry on a channel's completed transaction
- * list. If that list is empty, the hardware is consulted to determine
- * whether any new transactions have completed. If so, they're moved to the
- * completed list and the new first entry is returned. If there are no more
- * completed transactions, a null pointer is returned.
+ * This function returns the first of a channel's completed transactions.
+ * If no transactions are in completed state, the hardware is consulted to
+ * determine whether any new transactions have completed. If so, they're
+ * moved to completed state and the first such transaction is returned.
+ * If there are no more completed transactions, a null pointer is returned.
*/
static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
{
struct gsi_trans *trans;
- /* Get the first transaction from the completed list */
+ /* Get the first completed transaction */
trans = gsi_channel_trans_complete(channel);
- if (!trans) /* List is empty; see if there's more to do */
- trans = gsi_channel_update(channel);
-
if (trans)
gsi_trans_move_polled(trans);
@@ -1617,11 +1603,11 @@ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
gsi_channel_program(channel, true);
if (channel->toward_ipa)
- netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
- gsi_channel_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(&gsi->dummy_dev, &channel->napi,
+ gsi_channel_poll);
else
netif_napi_add(&gsi->dummy_dev, &channel->napi,
- gsi_channel_poll, NAPI_POLL_WEIGHT);
+ gsi_channel_poll);
return 0;
@@ -1648,19 +1634,25 @@ static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
}
+/* We use generic commands only to operate on modem channels. We don't have
+ * the ability to determine channel state for a modem channel, so we simply
+ * issue the command and wait for it to complete.
+ */
static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
- enum gsi_generic_cmd_opcode opcode)
+ enum gsi_generic_cmd_opcode opcode,
+ u8 params)
{
- struct completion *completion = &gsi->completion;
bool timeout;
u32 val;
- /* The error global interrupt type is always enabled (until we
- * teardown), so we won't change that. A generic EE command
- * completes with a GSI global interrupt of type GP_INT1. We
- * only perform one generic command at a time (to allocate or
- * halt a modem channel) and only from this function. So we
- * enable the GP_INT1 IRQ type here while we're expecting it.
+ /* The error global interrupt type is always enabled (until we tear
+ * down), so we will keep it enabled.
+ *
+ * A generic EE command completes with a GSI global interrupt of
+ * type GP_INT1. We only perform one generic command at a time
+ * (to allocate, halt, or enable/disable flow control on a modem
+ * channel), and only from this function. So we enable the GP_INT1
+ * IRQ type here, and disable it again after the command completes.
*/
val = BIT(ERROR_INT) | BIT(GP_INT1);
iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
@@ -1674,8 +1666,9 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
+ val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK);
- timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val);
/* Disable the GP_INT1 IRQ type again */
iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
@@ -1692,7 +1685,7 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
{
return gsi_generic_command(gsi, channel_id,
- GSI_GENERIC_ALLOCATE_CHANNEL);
+ GSI_GENERIC_ALLOCATE_CHANNEL, 0);
}
static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
@@ -1702,7 +1695,7 @@ static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
do
ret = gsi_generic_command(gsi, channel_id,
- GSI_GENERIC_HALT_CHANNEL);
+ GSI_GENERIC_HALT_CHANNEL, 0);
while (ret == -EAGAIN && retries--);
if (ret)
@@ -1710,6 +1703,32 @@ static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
ret, channel_id);
}
+/* Enable or disable flow control for a modem GSI TX channel (IPA v4.2+) */
+void
+gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, bool enable)
+{
+ u32 retries = 0;
+ u32 command;
+ int ret;
+
+ command = enable ? GSI_GENERIC_ENABLE_FLOW_CONTROL
+ : GSI_GENERIC_DISABLE_FLOW_CONTROL;
+ /* Disabling flow control on IPA v4.11+ can return -EAGAIN if enable
+ * is underway. In this case we need to retry the command.
+ */
+ if (!enable && gsi->version >= IPA_VERSION_4_11)
+ retries = GSI_CHANNEL_MODEM_FLOW_RETRIES;
+
+ do
+ ret = gsi_generic_command(gsi, channel_id, command, 0);
+ while (ret == -EAGAIN && retries--);
+
+ if (ret)
+ dev_err(gsi->dev,
+ "error %d %sabling mode channel %u flow control\n",
+ ret, enable ? "en" : "dis", channel_id);
+}
+
/* Setup function for channels */
static int gsi_channel_setup(struct gsi *gsi)
{
@@ -1975,21 +1994,10 @@ static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
gsi_evt_ring_id_free(gsi, evt_ring_id);
}
-/* Init function for event rings; there is no gsi_evt_ring_exit() */
-static void gsi_evt_ring_init(struct gsi *gsi)
-{
- u32 evt_ring_id = 0;
-
- gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
- gsi->ieob_enabled_bitmap = 0;
- do
- init_completion(&gsi->evt_ring[evt_ring_id].completion);
- while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
-}
-
-static bool gsi_channel_data_valid(struct gsi *gsi,
+static bool gsi_channel_data_valid(struct gsi *gsi, bool command,
const struct ipa_gsi_endpoint_data *data)
{
+ const struct gsi_channel_data *channel_data;
u32 channel_id = data->channel_id;
struct device *dev = gsi->dev;
@@ -2005,10 +2013,24 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
return false;
}
- if (!data->channel.tlv_count ||
- data->channel.tlv_count > GSI_TLV_MAX) {
+ if (command && !data->toward_ipa) {
+ dev_err(dev, "command channel %u is not TX\n", channel_id);
+ return false;
+ }
+
+ channel_data = &data->channel;
+
+ if (!channel_data->tlv_count ||
+ channel_data->tlv_count > GSI_TLV_MAX) {
dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
- channel_id, data->channel.tlv_count, GSI_TLV_MAX);
+ channel_id, channel_data->tlv_count, GSI_TLV_MAX);
+ return false;
+ }
+
+ if (command && IPA_COMMAND_TRANS_TRE_MAX > channel_data->tlv_count) {
+ dev_err(dev, "command TRE max too big for channel %u (%u > %u)\n",
+ channel_id, IPA_COMMAND_TRANS_TRE_MAX,
+ channel_data->tlv_count);
return false;
}
@@ -2017,22 +2039,22 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
* gsi_channel_tre_max() is computed, tre_count has to be almost
* twice the TLV FIFO size to satisfy this requirement.
*/
- if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
+ if (channel_data->tre_count < 2 * channel_data->tlv_count - 1) {
dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
- channel_id, data->channel.tlv_count,
- data->channel.tre_count);
+ channel_id, channel_data->tlv_count,
+ channel_data->tre_count);
return false;
}
- if (!is_power_of_2(data->channel.tre_count)) {
+ if (!is_power_of_2(channel_data->tre_count)) {
dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
- channel_id, data->channel.tre_count);
+ channel_id, channel_data->tre_count);
return false;
}
- if (!is_power_of_2(data->channel.event_count)) {
+ if (!is_power_of_2(channel_data->event_count)) {
dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
- channel_id, data->channel.event_count);
+ channel_id, channel_data->event_count);
return false;
}
@@ -2048,7 +2070,7 @@ static int gsi_channel_init_one(struct gsi *gsi,
u32 tre_count;
int ret;
- if (!gsi_channel_data_valid(gsi, data))
+ if (!gsi_channel_data_valid(gsi, command, data))
return -EINVAL;
/* Worst case we need an event for every outstanding TRE */
@@ -2066,10 +2088,9 @@ static int gsi_channel_init_one(struct gsi *gsi,
channel->gsi = gsi;
channel->toward_ipa = data->toward_ipa;
channel->command = command;
- channel->tlv_count = data->channel.tlv_count;
+ channel->trans_tre_max = data->channel.tlv_count;
channel->tre_count = tre_count;
channel->event_count = data->channel.event_count;
- init_completion(&channel->completion);
ret = gsi_channel_evt_ring_init(channel);
if (ret)
@@ -2129,7 +2150,8 @@ static int gsi_channel_init(struct gsi *gsi, u32 count,
/* IPA v4.2 requires the AP to allocate channels for the modem */
modem_alloc = gsi->version == IPA_VERSION_4_2;
- gsi_evt_ring_init(gsi); /* No matching exit required */
+ gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
+ gsi->ieob_enabled_bitmap = 0;
/* The endpoint data array is indexed by endpoint name */
for (i = 0; i < count; i++) {
@@ -2281,13 +2303,5 @@ u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
struct gsi_channel *channel = &gsi->channel[channel_id];
/* Hardware limit is channel->tre_count - 1 */
- return channel->tre_count - (channel->tlv_count - 1);
-}
-
-/* Returns the maximum number of TREs in a single transaction for a channel */
-u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
-{
- struct gsi_channel *channel = &gsi->channel[channel_id];
-
- return channel->tlv_count;
+ return channel->tre_count - (channel->trans_tre_max - 1);
}