aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ipa
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ipa')
-rw-r--r--drivers/net/ipa/Kconfig2
-rw-r--r--drivers/net/ipa/Makefile12
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.1.c (renamed from drivers/net/ipa/ipa_data-v3.1.c)14
-rw-r--r--drivers/net/ipa/data/ipa_data-v3.5.1.c (renamed from drivers/net/ipa/ipa_data-v3.5.1.c)26
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.11.c (renamed from drivers/net/ipa/ipa_data-v4.11.c)12
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.2.c (renamed from drivers/net/ipa/ipa_data-v4.2.c)12
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.5.c (renamed from drivers/net/ipa/ipa_data-v4.5.c)19
-rw-r--r--drivers/net/ipa/data/ipa_data-v4.9.c (renamed from drivers/net/ipa/ipa_data-v4.9.c)12
-rw-r--r--drivers/net/ipa/gsi.c464
-rw-r--r--drivers/net/ipa/gsi.h70
-rw-r--r--drivers/net/ipa/gsi_private.h38
-rw-r--r--drivers/net/ipa/gsi_reg.h216
-rw-r--r--drivers/net/ipa/gsi_trans.c433
-rw-r--r--drivers/net/ipa/gsi_trans.h56
-rw-r--r--drivers/net/ipa/ipa.h6
-rw-r--r--drivers/net/ipa/ipa_cmd.c93
-rw-r--r--drivers/net/ipa/ipa_cmd.h13
-rw-r--r--drivers/net/ipa/ipa_data.h72
-rw-r--r--drivers/net/ipa/ipa_endpoint.c923
-rw-r--r--drivers/net/ipa/ipa_endpoint.h116
-rw-r--r--drivers/net/ipa/ipa_interrupt.c53
-rw-r--r--drivers/net/ipa/ipa_interrupt.h2
-rw-r--r--drivers/net/ipa/ipa_main.c296
-rw-r--r--drivers/net/ipa/ipa_mem.c24
-rw-r--r--drivers/net/ipa/ipa_modem.c25
-rw-r--r--drivers/net/ipa/ipa_modem.h5
-rw-r--r--drivers/net/ipa/ipa_power.c232
-rw-r--r--drivers/net/ipa/ipa_power.h9
-rw-r--r--drivers/net/ipa/ipa_qmi.c12
-rw-r--r--drivers/net/ipa/ipa_qmi.h2
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.c10
-rw-r--r--drivers/net/ipa/ipa_qmi_msg.h41
-rw-r--r--drivers/net/ipa/ipa_reg.c97
-rw-r--r--drivers/net/ipa/ipa_reg.h1121
-rw-r--r--drivers/net/ipa/ipa_resource.c65
-rw-r--r--drivers/net/ipa/ipa_smp2p.c2
-rw-r--r--drivers/net/ipa/ipa_smp2p.h2
-rw-r--r--drivers/net/ipa/ipa_sysfs.c71
-rw-r--r--drivers/net/ipa/ipa_sysfs.h3
-rw-r--r--drivers/net/ipa/ipa_table.c79
-rw-r--r--drivers/net/ipa/ipa_table.h5
-rw-r--r--drivers/net/ipa/ipa_uc.c16
-rw-r--r--drivers/net/ipa/ipa_uc.h2
-rw-r--r--drivers/net/ipa/ipa_version.h30
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.1.c446
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v3.5.1.c456
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.11.c512
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.2.c456
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.5.c533
-rw-r--r--drivers/net/ipa/reg/ipa_reg-v4.9.c509
50 files changed, 5407 insertions, 2318 deletions
diff --git a/drivers/net/ipa/Kconfig b/drivers/net/ipa/Kconfig
index d037682fb7ad..6782c2cbf542 100644
--- a/drivers/net/ipa/Kconfig
+++ b/drivers/net/ipa/Kconfig
@@ -2,7 +2,9 @@ config QCOM_IPA
tristate "Qualcomm IPA support"
depends on NET && QCOM_SMEM
depends on ARCH_QCOM || COMPILE_TEST
+ depends on INTERCONNECT
depends on QCOM_RPROC_COMMON || (QCOM_RPROC_COMMON=n && COMPILE_TEST)
+ depends on QCOM_AOSS_QMP || QCOM_AOSS_QMP=n
select QCOM_MDT_LOADER if ARCH_QCOM
select QCOM_SCM
select QCOM_QMI_HELPERS
diff --git a/drivers/net/ipa/Makefile b/drivers/net/ipa/Makefile
index bdfb2430ab2c..48255fc4b25c 100644
--- a/drivers/net/ipa/Makefile
+++ b/drivers/net/ipa/Makefile
@@ -1,3 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Qualcomm IPA driver.
+
+IPA_VERSIONS := 3.1 3.5.1 4.2 4.5 4.9 4.11
+
obj-$(CONFIG_QCOM_IPA) += ipa.o
ipa-y := ipa_main.o ipa_power.o ipa_reg.o ipa_mem.o \
@@ -7,6 +13,6 @@ ipa-y := ipa_main.o ipa_power.o ipa_reg.o ipa_mem.o \
ipa_resource.o ipa_qmi.o ipa_qmi_msg.o \
ipa_sysfs.o
-ipa-y += ipa_data-v3.1.o ipa_data-v3.5.1.o \
- ipa_data-v4.2.o ipa_data-v4.5.o \
- ipa_data-v4.9.o ipa_data-v4.11.o
+ipa-y += $(IPA_VERSIONS:%=reg/ipa_reg-v%.o)
+
+ipa-y += $(IPA_VERSIONS:%=data/ipa_data-v%.o)
diff --git a/drivers/net/ipa/ipa_data-v3.1.c b/drivers/net/ipa/data/ipa_data-v3.1.c
index 06ddb85f39b2..e0d71f609272 100644
--- a/drivers/net/ipa/ipa_data-v3.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.1.c
@@ -6,10 +6,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.1 */
enum ipa_resource_type {
@@ -101,7 +101,9 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
},
},
},
@@ -148,6 +150,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 8192,
+ .aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
@@ -522,7 +526,7 @@ static const struct ipa_power_data ipa_power_data = {
/* Configuration data for an SoC having IPA v3.1 */
const struct ipa_data ipa_data_v3_1 = {
.version = IPA_VERSION_3_1,
- .backward_compat = BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK,
+ .backward_compat = BIT(BCR_CMDQ_L_LACK_ONE_ENTRY),
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
diff --git a/drivers/net/ipa/ipa_data-v3.5.1.c b/drivers/net/ipa/data/ipa_data-v3.5.1.c
index 760c22bbdf70..42f2c88a92d4 100644
--- a/drivers/net/ipa/ipa_data-v3.5.1.c
+++ b/drivers/net/ipa/data/ipa_data-v3.5.1.c
@@ -6,10 +6,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v3.5.1 */
enum ipa_resource_type {
@@ -92,7 +92,9 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
},
},
},
@@ -140,6 +142,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 8192,
+ .aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
@@ -175,10 +179,10 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
static const struct ipa_resource ipa_resource_src[] = {
[IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = {
.limits[IPA_RSRC_GROUP_SRC_LWA_DL] = {
- .min = 1, .max = 255,
+ .min = 1, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_UL_DL] = {
- .min = 1, .max = 255,
+ .min = 1, .max = 63,
},
.limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = {
.min = 1, .max = 63,
@@ -403,11 +407,11 @@ static const struct ipa_power_data ipa_power_data = {
/* Configuration data for an SoC having IPA v3.5.1 */
const struct ipa_data ipa_data_v3_5_1 = {
.version = IPA_VERSION_3_5_1,
- .backward_compat = BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK |
- BCR_TX_NOT_USING_BRESP_FMASK |
- BCR_SUSPEND_L2_IRQ_FMASK |
- BCR_HOLB_DROP_L2_IRQ_FMASK |
- BCR_DUAL_TX_FMASK,
+ .backward_compat = BIT(BCR_CMDQ_L_LACK_ONE_ENTRY) |
+ BIT(BCR_TX_NOT_USING_BRESP) |
+ BIT(BCR_SUSPEND_L2_IRQ) |
+ BIT(BCR_HOLB_DROP_L2_IRQ) |
+ BIT(BCR_DUAL_TX),
.qsb_count = ARRAY_SIZE(ipa_qsb_data),
.qsb_data = ipa_qsb_data,
.endpoint_count = ARRAY_SIZE(ipa_gsi_endpoint_data),
diff --git a/drivers/net/ipa/ipa_data-v4.11.c b/drivers/net/ipa/data/ipa_data-v4.11.c
index fea91451a0c3..a204e439c23d 100644
--- a/drivers/net/ipa/ipa_data-v4.11.c
+++ b/drivers/net/ipa/data/ipa_data-v4.11.c
@@ -4,10 +4,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.11 */
enum ipa_resource_type {
@@ -86,7 +86,9 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
},
},
},
@@ -133,6 +135,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 32768,
+ .aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data-v4.2.c b/drivers/net/ipa/data/ipa_data-v4.2.c
index 2a231e79d5e1..04f574fe006f 100644
--- a/drivers/net/ipa/ipa_data-v4.2.c
+++ b/drivers/net/ipa/data/ipa_data-v4.2.c
@@ -4,10 +4,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.2 */
enum ipa_resource_type {
@@ -82,7 +82,9 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
},
},
},
@@ -130,6 +132,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 8192,
+ .aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/ipa_data-v4.5.c b/drivers/net/ipa/data/ipa_data-v4.5.c
index e62ab9c3ac67..684239e71f46 100644
--- a/drivers/net/ipa/ipa_data-v4.5.c
+++ b/drivers/net/ipa/data/ipa_data-v4.5.c
@@ -4,10 +4,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.5 */
enum ipa_resource_type {
@@ -95,7 +95,9 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
},
},
},
@@ -142,6 +144,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 8192,
+ .aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
@@ -420,15 +424,10 @@ static const struct ipa_mem_data ipa_mem_data = {
/* Interconnect rates are in 1000 byte/second units */
static const struct ipa_interconnect_data ipa_interconnect_data[] = {
{
- .name = "memory-a",
+ .name = "memory",
.peak_bandwidth = 600000, /* 600 MBps */
.average_bandwidth = 150000, /* 150 MBps */
},
- {
- .name = "memory-b",
- .peak_bandwidth = 1804000, /* 1.804 GBps */
- .average_bandwidth = 150000, /* 150 MBps */
- },
/* Average rate is unused for the next two interconnects */
{
.name = "imem",
diff --git a/drivers/net/ipa/ipa_data-v4.9.c b/drivers/net/ipa/data/ipa_data-v4.9.c
index 2421b5abb5d4..2333e15f9533 100644
--- a/drivers/net/ipa/ipa_data-v4.9.c
+++ b/drivers/net/ipa/data/ipa_data-v4.9.c
@@ -4,10 +4,10 @@
#include <linux/log2.h>
-#include "gsi.h"
-#include "ipa_data.h"
-#include "ipa_endpoint.h"
-#include "ipa_mem.h"
+#include "../gsi.h"
+#include "../ipa_data.h"
+#include "../ipa_endpoint.h"
+#include "../ipa_mem.h"
/** enum ipa_resource_type - IPA resource types for an SoC having IPA v4.9 */
enum ipa_resource_type {
@@ -87,7 +87,9 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.aggregation = true,
.status_enable = true,
.rx = {
+ .buffer_size = 8192,
.pad_align = ilog2(sizeof(u32)),
+ .aggr_time_limit = 500,
},
},
},
@@ -134,6 +136,8 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = {
.qmap = true,
.aggregation = true,
.rx = {
+ .buffer_size = 8192,
+ .aggr_time_limit = 500,
.aggr_close_eof = true,
},
},
diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c
index a2fcdb1abdb9..bea2da1c4c51 100644
--- a/drivers/net/ipa/gsi.c
+++ b/drivers/net/ipa/gsi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -56,9 +56,9 @@
* element can also contain an immediate command, requesting the IPA perform
* actions other than data transfer.
*
- * Each TRE refers to a block of data--also located DRAM. After writing one
- * or more TREs to a channel, the writer (either the IPA or an EE) writes a
- * doorbell register to inform the receiving side how many elements have
+ * Each TRE refers to a block of data--also located in DRAM. After writing
+ * one or more TREs to a channel, the writer (either the IPA or an EE) writes
+ * a doorbell register to inform the receiving side how many elements have
* been written.
*
* Each channel has a GSI "event ring" associated with it. An event ring
@@ -93,6 +93,7 @@
#define GSI_CHANNEL_STOP_RETRIES 10
#define GSI_CHANNEL_MODEM_HALT_RETRIES 10
+#define GSI_CHANNEL_MODEM_FLOW_RETRIES 5 /* disable flow control only */
#define GSI_MHI_EVENT_ID_START 10 /* 1st reserved event id */
#define GSI_MHI_EVENT_ID_END 16 /* Last reserved event id */
@@ -339,10 +340,10 @@ static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
* completion to be signaled. Returns true if the command completes
* or false if it times out.
*/
-static bool
-gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
+static bool gsi_command(struct gsi *gsi, u32 reg, u32 val)
{
unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
+ struct completion *completion = &gsi->completion;
reinit_completion(completion);
@@ -366,8 +367,6 @@ gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
enum gsi_evt_cmd_opcode opcode)
{
- struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- struct completion *completion = &evt_ring->completion;
struct device *dev = gsi->dev;
bool timeout;
u32 val;
@@ -378,7 +377,7 @@ static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
- timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val);
gsi_irq_ev_ctrl_disable(gsi);
@@ -478,7 +477,6 @@ static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
static void
gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
{
- struct completion *completion = &channel->completion;
u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi;
struct device *dev = gsi->dev;
@@ -490,7 +488,7 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
val = u32_encode_bits(channel_id, CH_CHID_FMASK);
val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
- timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val);
gsi_irq_ch_ctrl_disable(gsi);
@@ -667,7 +665,8 @@ static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
{
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
- size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
+ struct gsi_ring *ring = &evt_ring->ring;
+ size_t size;
u32 val;
/* We program all event rings as GPI type/protocol */
@@ -676,6 +675,7 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
+ size = ring->count * GSI_RING_ELEMENT_SIZE;
val = ev_r_length_encoded(gsi->version, size);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
@@ -683,9 +683,9 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
* high-order 32 bits of the address of the event ring,
* respectively.
*/
- val = lower_32_bits(evt_ring->ring.addr);
+ val = lower_32_bits(ring->addr);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
- val = upper_32_bits(evt_ring->ring.addr);
+ val = upper_32_bits(ring->addr);
iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
/* Enable interrupt moderation by setting the moderation delay */
@@ -702,48 +702,40 @@ static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
- /* Finally, tell the hardware we've completed event 0 (arbitrary) */
- gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
+ /* Finally, tell the hardware our "last processed" event (arbitrary) */
+ gsi_evt_ring_doorbell(gsi, evt_ring_id, ring->index);
}
/* Find the transaction whose completion indicates a channel is quiesced */
static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
- const struct list_head *list;
+ u32 pending_id = trans_info->pending_id;
struct gsi_trans *trans;
-
- spin_lock_bh(&trans_info->spinlock);
-
- /* There is a small chance a TX transaction got allocated just
- * before we disabled transmits, so check for that.
- */
- if (channel->toward_ipa) {
- list = &trans_info->alloc;
- if (!list_empty(list))
- goto done;
- list = &trans_info->pending;
- if (!list_empty(list))
- goto done;
+ u16 trans_id;
+
+ if (channel->toward_ipa && pending_id != trans_info->free_id) {
+ /* There is a small chance a TX transaction got allocated
+ * just before we disabled transmits, so check for that.
+ * The last allocated, committed, or pending transaction
+ * precedes the first free transaction.
+ */
+ trans_id = trans_info->free_id - 1;
+ } else if (trans_info->polled_id != pending_id) {
+ /* Otherwise (TX or RX) we want to wait for anything that
+ * has completed, or has been polled but not released yet.
+ *
+ * The last completed or polled transaction precedes the
+ * first pending transaction.
+ */
+ trans_id = pending_id - 1;
+ } else {
+ return NULL;
}
- /* Otherwise (TX or RX) we want to wait for anything that
- * has completed, or has been polled but not released yet.
- */
- list = &trans_info->complete;
- if (!list_empty(list))
- goto done;
- list = &trans_info->polled;
- if (list_empty(list))
- list = NULL;
-done:
- trans = list ? list_last_entry(list, struct gsi_trans, links) : NULL;
-
/* Caller will wait for this, so take a reference */
- if (trans)
- refcount_inc(&trans->refcount);
-
- spin_unlock_bh(&trans_info->spinlock);
+ trans = &trans_info->trans[trans_id % channel->tre_count];
+ refcount_inc(&trans->refcount);
return trans;
}
@@ -772,9 +764,6 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
u32 wrr_weight = 0;
u32 val;
- /* Arbitrarily pick TRE 0 as the first channel element to use */
- channel->tre_ring.index = 0;
-
/* We program all channels as GPI type/protocol */
val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI);
if (channel->toward_ipa)
@@ -825,7 +814,7 @@ static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
/* Now update the scratch registers for GPI protocol */
gpi = &scr.gpi;
- gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
+ gpi->max_outstanding_tre = channel->trans_tre_max *
GSI_RING_ELEMENT_SIZE;
gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
@@ -951,6 +940,8 @@ void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa)
gsi_channel_reset_command(channel);
+ /* Hardware assumes this is 0 following reset */
+ channel->tre_ring.index = 0;
gsi_channel_program(channel, doorbell);
gsi_channel_trans_cancel_pending(channel);
@@ -993,75 +984,66 @@ void gsi_resume(struct gsi *gsi)
enable_irq(gsi->irq);
}
-/**
- * gsi_channel_tx_queued() - Report queued TX transfers for a channel
- * @channel: Channel for which to report
- *
- * Report to the network stack the number of bytes and transactions that
- * have been queued to hardware since last call. This and the next function
- * supply information used by the network stack for throttling.
- *
- * For each channel we track the number of transactions used and bytes of
- * data those transactions represent. We also track what those values are
- * each time this function is called. Subtracting the two tells us
- * the number of bytes and transactions that have been added between
- * successive calls.
- *
- * Calling this each time we ring the channel doorbell allows us to
- * provide accurate information to the network stack about how much
- * work we've given the hardware at any point in time.
- */
-void gsi_channel_tx_queued(struct gsi_channel *channel)
+void gsi_trans_tx_committed(struct gsi_trans *trans)
{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+
+ channel->trans_count++;
+ channel->byte_count += trans->len;
+
+ trans->trans_count = channel->trans_count;
+ trans->byte_count = channel->byte_count;
+}
+
+void gsi_trans_tx_queued(struct gsi_trans *trans)
+{
+ u32 channel_id = trans->channel_id;
+ struct gsi *gsi = trans->gsi;
+ struct gsi_channel *channel;
u32 trans_count;
u32 byte_count;
+ channel = &gsi->channel[channel_id];
+
byte_count = channel->byte_count - channel->queued_byte_count;
trans_count = channel->trans_count - channel->queued_trans_count;
channel->queued_byte_count = channel->byte_count;
channel->queued_trans_count = channel->trans_count;
- ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
- trans_count, byte_count);
+ ipa_gsi_channel_tx_queued(gsi, channel_id, trans_count, byte_count);
}
/**
- * gsi_channel_tx_update() - Report completed TX transfers
- * @channel: Channel that has completed transmitting packets
- * @trans: Last transation known to be complete
- *
- * Compute the number of transactions and bytes that have been transferred
- * over a TX channel since the given transaction was committed. Report this
- * information to the network stack.
+ * gsi_trans_tx_completed() - Report completed TX transactions
+ * @trans: TX channel transaction that has completed
*
- * At the time a transaction is committed, we record its channel's
- * committed transaction and byte counts *in the transaction*.
- * Completions are signaled by the hardware with an interrupt, and
- * we can determine the latest completed transaction at that time.
+ * Report that a transaction on a TX channel has completed. At the time a
+ * transaction is committed, we record *in the transaction* its channel's
+ * committed transaction and byte counts. Transactions are completed in
+ * order, and the difference between the channel's byte/transaction count
+ * when the transaction was committed and when it completes tells us
+ * exactly how much data has been transferred while the transaction was
+ * pending.
*
- * The difference between the byte/transaction count recorded in
- * the transaction and the count last time we recorded a completion
- * tells us exactly how much data has been transferred between
- * completions.
- *
- * Calling this each time we learn of a newly-completed transaction
- * allows us to provide accurate information to the network stack
- * about how much work has been completed by the hardware at a given
- * point in time.
+ * We report this information to the network stack, which uses it to manage
+ * the rate at which data is sent to hardware.
*/
-static void
-gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
+static void gsi_trans_tx_completed(struct gsi_trans *trans)
{
- u64 byte_count = trans->byte_count + trans->len;
- u64 trans_count = trans->trans_count + 1;
+ u32 channel_id = trans->channel_id;
+ struct gsi *gsi = trans->gsi;
+ struct gsi_channel *channel;
+ u32 trans_count;
+ u32 byte_count;
+
+ channel = &gsi->channel[channel_id];
+ trans_count = trans->trans_count - channel->compl_trans_count;
+ byte_count = trans->byte_count - channel->compl_byte_count;
- byte_count -= channel->compl_byte_count;
- channel->compl_byte_count += byte_count;
- trans_count -= channel->compl_trans_count;
channel->compl_trans_count += trans_count;
+ channel->compl_byte_count += byte_count;
- ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
- trans_count, byte_count);
+ ipa_gsi_channel_tx_completed(gsi, channel_id, trans_count, byte_count);
}
/* Channel control interrupt handler */
@@ -1074,13 +1056,10 @@ static void gsi_isr_chan_ctrl(struct gsi *gsi)
while (channel_mask) {
u32 channel_id = __ffs(channel_mask);
- struct gsi_channel *channel;
channel_mask ^= BIT(channel_id);
- channel = &gsi->channel[channel_id];
-
- complete(&channel->completion);
+ complete(&gsi->completion);
}
}
@@ -1094,13 +1073,10 @@ static void gsi_isr_evt_ctrl(struct gsi *gsi)
while (event_mask) {
u32 evt_ring_id = __ffs(event_mask);
- struct gsi_evt_ring *evt_ring;
event_mask ^= BIT(evt_ring_id);
- evt_ring = &gsi->evt_ring[evt_ring_id];
-
- complete(&evt_ring->completion);
+ complete(&gsi->completion);
}
}
@@ -1110,7 +1086,7 @@ gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
{
if (code == GSI_OUT_OF_RESOURCES) {
dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
- complete(&gsi->channel[channel_id].completion);
+ complete(&gsi->completion);
return;
}
@@ -1127,7 +1103,7 @@ gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
u32 channel_id = gsi_channel_id(evt_ring->channel);
- complete(&evt_ring->completion);
+ complete(&gsi->completion);
dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
channel_id);
return;
@@ -1171,26 +1147,31 @@ static void gsi_isr_gp_int1(struct gsi *gsi)
u32 result;
u32 val;
- /* This interrupt is used to handle completions of the two GENERIC
- * GSI commands. We use these to allocate and halt channels on
- * the modem's behalf due to a hardware quirk on IPA v4.2. Once
- * allocated, the modem "owns" these channels, and as a result we
- * have no way of knowing the channel's state at any given time.
+ /* This interrupt is used to handle completions of GENERIC GSI
+ * commands. We use these to allocate and halt channels on the
+ * modem's behalf due to a hardware quirk on IPA v4.2. The modem
+ * "owns" channels even when the AP allocates them, and have no
+ * way of knowing whether a modem channel's state has been changed.
+ *
+ * We also use GENERIC commands to enable/disable channel flow
+ * control for IPA v4.2+.
*
* It is recommended that we halt the modem channels we allocated
* when shutting down, but it's possible the channel isn't running
* at the time we issue the HALT command. We'll get an error in
* that case, but it's harmless (the channel is already halted).
+ * Similarly, we could get an error back when updating flow control
+ * on a channel because it's not in the proper state.
*
- * For this reason, we silently ignore a CHANNEL_NOT_RUNNING error
- * if we receive it.
+ * In either case, we silently ignore a INCORRECT_CHANNEL_STATE
+ * error if we receive it.
*/
val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
switch (result) {
case GENERIC_EE_SUCCESS:
- case GENERIC_EE_CHANNEL_NOT_RUNNING:
+ case GENERIC_EE_INCORRECT_CHANNEL_STATE:
gsi->result = 0;
break;
@@ -1330,60 +1311,73 @@ static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
}
/* Return the transaction associated with a transfer completion event */
-static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
- struct gsi_event *event)
+static struct gsi_trans *
+gsi_event_trans(struct gsi *gsi, struct gsi_event *event)
{
+ u32 channel_id = event->chid;
+ struct gsi_channel *channel;
+ struct gsi_trans *trans;
u32 tre_offset;
u32 tre_index;
+ channel = &gsi->channel[channel_id];
+ if (WARN(!channel->gsi, "event has bad channel %u\n", channel_id))
+ return NULL;
+
/* Event xfer_ptr records the TRE it's associated with */
tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr));
tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
- return gsi_channel_trans_mapped(channel, tre_index);
+ trans = gsi_channel_trans_mapped(channel, tre_index);
+
+ if (WARN(!trans, "channel %u event with no transaction\n", channel_id))
+ return NULL;
+
+ return trans;
}
/**
- * gsi_evt_ring_rx_update() - Record lengths of received data
- * @evt_ring: Event ring associated with channel that received packets
- * @index: Event index in ring reported by hardware
+ * gsi_evt_ring_update() - Update transaction state from hardware
+ * @gsi: GSI pointer
+ * @evt_ring_id: Event ring ID
+ * @index: Event index in ring reported by hardware
*
* Events for RX channels contain the actual number of bytes received into
* the buffer. Every event has a transaction associated with it, and here
* we update transactions to record their actual received lengths.
*
+ * When an event for a TX channel arrives we use information in the
+ * transaction to report the number of requests and bytes that have
+ * been transferred.
+ *
* This function is called whenever we learn that the GSI hardware has filled
* new events since the last time we checked. The ring's index field tells
* the first entry in need of processing. The index provided is the
* first *unfilled* event in the ring (following the last filled one).
*
* Events are sequential within the event ring, and transactions are
- * sequential within the transaction pool.
+ * sequential within the transaction array.
*
* Note that @index always refers to an element *within* the event ring.
*/
-static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
+static void gsi_evt_ring_update(struct gsi *gsi, u32 evt_ring_id, u32 index)
{
- struct gsi_channel *channel = evt_ring->channel;
+ struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct gsi_ring *ring = &evt_ring->ring;
- struct gsi_trans_info *trans_info;
struct gsi_event *event_done;
struct gsi_event *event;
- struct gsi_trans *trans;
- u32 byte_count = 0;
- u32 old_index;
u32 event_avail;
+ u32 old_index;
- trans_info = &channel->trans_info;
-
- /* We'll start with the oldest un-processed event. RX channels
- * replenish receive buffers in single-TRE transactions, so we
- * can just map that event to its transaction. Transactions
- * associated with completion events are consecutive.
+ /* Starting with the oldest un-processed event, determine which
+ * transaction (and which channel) is associated with the event.
+ * For RX channels, update each completed transaction with the
+ * number of bytes that were actually received. For TX channels
+ * associated with a network device, report to the network stack
+ * the number of transfers and bytes this completion represents.
*/
old_index = ring->index;
event = gsi_ring_virt(ring, old_index);
- trans = gsi_event_trans(channel, event);
/* Compute the number of events to process before we wrap,
* and determine when we'll be done processing events.
@@ -1391,20 +1385,28 @@ static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
event_avail = ring->count - old_index % ring->count;
event_done = gsi_ring_virt(ring, index);
do {
- trans->len = __le16_to_cpu(event->len);
- byte_count += trans->len;
+ struct gsi_trans *trans;
+
+ trans = gsi_event_trans(gsi, event);
+ if (!trans)
+ return;
+
+ if (trans->direction == DMA_FROM_DEVICE)
+ trans->len = __le16_to_cpu(event->len);
+ else
+ gsi_trans_tx_completed(trans);
+
+ gsi_trans_move_complete(trans);
/* Move on to the next event and transaction */
if (--event_avail)
event++;
else
event = gsi_ring_virt(ring, 0);
- trans = gsi_trans_pool_next(&trans_info->pool, trans);
} while (event != event_done);
- /* We record RX bytes when they are received */
- channel->byte_count += byte_count;
- channel->trans_count++;
+ /* Tell the hardware we've handled these events */
+ gsi_evt_ring_doorbell(gsi, evt_ring_id, index);
}
/* Initialize a ring, including allocating DMA memory for its entries */
@@ -1424,6 +1426,7 @@ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
ring->addr = addr;
ring->count = count;
+ ring->index = 0;
return 0;
}
@@ -1471,8 +1474,8 @@ void gsi_channel_doorbell(struct gsi_channel *channel)
iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
}
-/* Consult hardware, move any newly completed transactions to completed list */
-static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
+/* Consult hardware, move newly completed transactions to completed state */
+void gsi_channel_update(struct gsi_channel *channel)
{
u32 evt_ring_id = channel->evt_ring_id;
struct gsi *gsi = channel->gsi;
@@ -1491,33 +1494,19 @@ static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
if (index == ring->index % ring->count)
- return NULL;
+ return;
- /* Get the transaction for the latest completed event. Take a
- * reference to keep it from completing before we give the events
- * for this and previous transactions back to the hardware.
- */
- trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
- refcount_inc(&trans->refcount);
+ /* Get the transaction for the latest completed event. */
+ trans = gsi_event_trans(gsi, gsi_ring_virt(ring, index - 1));
+ if (!trans)
+ return;
/* For RX channels, update each completed transaction with the number
* of bytes that were actually received. For TX channels, report
* the number of transactions and bytes this completion represents
* up the network stack.
*/
- if (channel->toward_ipa)
- gsi_channel_tx_update(channel, trans);
- else
- gsi_evt_ring_rx_update(evt_ring, index);
-
- gsi_trans_move_complete(trans);
-
- /* Tell the hardware we've handled these events */
- gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
-
- gsi_trans_free(trans);
-
- return gsi_channel_trans_complete(channel);
+ gsi_evt_ring_update(gsi, evt_ring_id, index);
}
/**
@@ -1526,21 +1515,18 @@ static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
*
* Return: Transaction pointer, or null if none are available
*
- * This function returns the first entry on a channel's completed transaction
- * list. If that list is empty, the hardware is consulted to determine
- * whether any new transactions have completed. If so, they're moved to the
- * completed list and the new first entry is returned. If there are no more
- * completed transactions, a null pointer is returned.
+ * This function returns the first of a channel's completed transactions.
+ * If no transactions are in completed state, the hardware is consulted to
+ * determine whether any new transactions have completed. If so, they're
+ * moved to completed state and the first such transaction is returned.
+ * If there are no more completed transactions, a null pointer is returned.
*/
static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
{
struct gsi_trans *trans;
- /* Get the first transaction from the completed list */
+ /* Get the first completed transaction */
trans = gsi_channel_trans_complete(channel);
- if (!trans) /* List is empty; see if there's more to do */
- trans = gsi_channel_update(channel);
-
if (trans)
gsi_trans_move_polled(trans);
@@ -1617,11 +1603,11 @@ static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
gsi_channel_program(channel, true);
if (channel->toward_ipa)
- netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
- gsi_channel_poll, NAPI_POLL_WEIGHT);
+ netif_napi_add_tx(&gsi->dummy_dev, &channel->napi,
+ gsi_channel_poll);
else
netif_napi_add(&gsi->dummy_dev, &channel->napi,
- gsi_channel_poll, NAPI_POLL_WEIGHT);
+ gsi_channel_poll);
return 0;
@@ -1648,19 +1634,25 @@ static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
}
+/* We use generic commands only to operate on modem channels. We don't have
+ * the ability to determine channel state for a modem channel, so we simply
+ * issue the command and wait for it to complete.
+ */
static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
- enum gsi_generic_cmd_opcode opcode)
+ enum gsi_generic_cmd_opcode opcode,
+ u8 params)
{
- struct completion *completion = &gsi->completion;
bool timeout;
u32 val;
- /* The error global interrupt type is always enabled (until we
- * teardown), so we won't change that. A generic EE command
- * completes with a GSI global interrupt of type GP_INT1. We
- * only perform one generic command at a time (to allocate or
- * halt a modem channel) and only from this function. So we
- * enable the GP_INT1 IRQ type here while we're expecting it.
+ /* The error global interrupt type is always enabled (until we tear
+ * down), so we will keep it enabled.
+ *
+ * A generic EE command completes with a GSI global interrupt of
+ * type GP_INT1. We only perform one generic command at a time
+ * (to allocate, halt, or enable/disable flow control on a modem
+ * channel), and only from this function. So we enable the GP_INT1
+ * IRQ type here, and disable it again after the command completes.
*/
val = BIT(ERROR_INT) | BIT(GP_INT1);
iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
@@ -1674,8 +1666,9 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
+ val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK);
- timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
+ timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val);
/* Disable the GP_INT1 IRQ type again */
iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
@@ -1692,7 +1685,7 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
{
return gsi_generic_command(gsi, channel_id,
- GSI_GENERIC_ALLOCATE_CHANNEL);
+ GSI_GENERIC_ALLOCATE_CHANNEL, 0);
}
static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
@@ -1702,7 +1695,7 @@ static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
do
ret = gsi_generic_command(gsi, channel_id,
- GSI_GENERIC_HALT_CHANNEL);
+ GSI_GENERIC_HALT_CHANNEL, 0);
while (ret == -EAGAIN && retries--);
if (ret)
@@ -1710,6 +1703,32 @@ static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
ret, channel_id);
}
+/* Enable or disable flow control for a modem GSI TX channel (IPA v4.2+) */
+void
+gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, bool enable)
+{
+ u32 retries = 0;
+ u32 command;
+ int ret;
+
+ command = enable ? GSI_GENERIC_ENABLE_FLOW_CONTROL
+ : GSI_GENERIC_DISABLE_FLOW_CONTROL;
+ /* Disabling flow control on IPA v4.11+ can return -EAGAIN if enable
+ * is underway. In this case we need to retry the command.
+ */
+ if (!enable && gsi->version >= IPA_VERSION_4_11)
+ retries = GSI_CHANNEL_MODEM_FLOW_RETRIES;
+
+ do
+ ret = gsi_generic_command(gsi, channel_id, command, 0);
+ while (ret == -EAGAIN && retries--);
+
+ if (ret)
+ dev_err(gsi->dev,
+ "error %d %sabling mode channel %u flow control\n",
+ ret, enable ? "en" : "dis", channel_id);
+}
+
/* Setup function for channels */
static int gsi_channel_setup(struct gsi *gsi)
{
@@ -1975,21 +1994,10 @@ static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
gsi_evt_ring_id_free(gsi, evt_ring_id);
}
-/* Init function for event rings; there is no gsi_evt_ring_exit() */
-static void gsi_evt_ring_init(struct gsi *gsi)
-{
- u32 evt_ring_id = 0;
-
- gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
- gsi->ieob_enabled_bitmap = 0;
- do
- init_completion(&gsi->evt_ring[evt_ring_id].completion);
- while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
-}
-
-static bool gsi_channel_data_valid(struct gsi *gsi,
+static bool gsi_channel_data_valid(struct gsi *gsi, bool command,
const struct ipa_gsi_endpoint_data *data)
{
+ const struct gsi_channel_data *channel_data;
u32 channel_id = data->channel_id;
struct device *dev = gsi->dev;
@@ -2005,10 +2013,24 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
return false;
}
- if (!data->channel.tlv_count ||
- data->channel.tlv_count > GSI_TLV_MAX) {
+ if (command && !data->toward_ipa) {
+ dev_err(dev, "command channel %u is not TX\n", channel_id);
+ return false;
+ }
+
+ channel_data = &data->channel;
+
+ if (!channel_data->tlv_count ||
+ channel_data->tlv_count > GSI_TLV_MAX) {
dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
- channel_id, data->channel.tlv_count, GSI_TLV_MAX);
+ channel_id, channel_data->tlv_count, GSI_TLV_MAX);
+ return false;
+ }
+
+ if (command && IPA_COMMAND_TRANS_TRE_MAX > channel_data->tlv_count) {
+ dev_err(dev, "command TRE max too big for channel %u (%u > %u)\n",
+ channel_id, IPA_COMMAND_TRANS_TRE_MAX,
+ channel_data->tlv_count);
return false;
}
@@ -2017,22 +2039,22 @@ static bool gsi_channel_data_valid(struct gsi *gsi,
* gsi_channel_tre_max() is computed, tre_count has to be almost
* twice the TLV FIFO size to satisfy this requirement.
*/
- if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
+ if (channel_data->tre_count < 2 * channel_data->tlv_count - 1) {
dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
- channel_id, data->channel.tlv_count,
- data->channel.tre_count);
+ channel_id, channel_data->tlv_count,
+ channel_data->tre_count);
return false;
}
- if (!is_power_of_2(data->channel.tre_count)) {
+ if (!is_power_of_2(channel_data->tre_count)) {
dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
- channel_id, data->channel.tre_count);
+ channel_id, channel_data->tre_count);
return false;
}
- if (!is_power_of_2(data->channel.event_count)) {
+ if (!is_power_of_2(channel_data->event_count)) {
dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
- channel_id, data->channel.event_count);
+ channel_id, channel_data->event_count);
return false;
}
@@ -2048,7 +2070,7 @@ static int gsi_channel_init_one(struct gsi *gsi,
u32 tre_count;
int ret;
- if (!gsi_channel_data_valid(gsi, data))
+ if (!gsi_channel_data_valid(gsi, command, data))
return -EINVAL;
/* Worst case we need an event for every outstanding TRE */
@@ -2066,10 +2088,9 @@ static int gsi_channel_init_one(struct gsi *gsi,
channel->gsi = gsi;
channel->toward_ipa = data->toward_ipa;
channel->command = command;
- channel->tlv_count = data->channel.tlv_count;
+ channel->trans_tre_max = data->channel.tlv_count;
channel->tre_count = tre_count;
channel->event_count = data->channel.event_count;
- init_completion(&channel->completion);
ret = gsi_channel_evt_ring_init(channel);
if (ret)
@@ -2129,7 +2150,8 @@ static int gsi_channel_init(struct gsi *gsi, u32 count,
/* IPA v4.2 requires the AP to allocate channels for the modem */
modem_alloc = gsi->version == IPA_VERSION_4_2;
- gsi_evt_ring_init(gsi); /* No matching exit required */
+ gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
+ gsi->ieob_enabled_bitmap = 0;
/* The endpoint data array is indexed by endpoint name */
for (i = 0; i < count; i++) {
@@ -2281,13 +2303,5 @@ u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
struct gsi_channel *channel = &gsi->channel[channel_id];
/* Hardware limit is channel->tre_count - 1 */
- return channel->tre_count - (channel->tlv_count - 1);
-}
-
-/* Returns the maximum number of TREs in a single transaction for a channel */
-u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
-{
- struct gsi_channel *channel = &gsi->channel[channel_id];
-
- return channel->tlv_count;
+ return channel->tre_count - (channel->trans_tre_max - 1);
}
diff --git a/drivers/net/ipa/gsi.h b/drivers/net/ipa/gsi.h
index 88b80dc3db79..49dcadba4e0b 100644
--- a/drivers/net/ipa/gsi.h
+++ b/drivers/net/ipa/gsi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _GSI_H_
#define _GSI_H_
@@ -31,14 +31,6 @@ struct gsi_trans;
struct gsi_channel_data;
struct ipa_gsi_endpoint_data;
-/* Execution environment IDs */
-enum gsi_ee_id {
- GSI_EE_AP = 0x0,
- GSI_EE_MODEM = 0x1,
- GSI_EE_UC = 0x2,
- GSI_EE_TZ = 0x3,
-};
-
struct gsi_ring {
void *virt; /* ring array base address */
dma_addr_t addr; /* primarily low 32 bits used */
@@ -48,12 +40,13 @@ struct gsi_ring {
*
* A channel ring consists of TRE entries filled by the AP and passed
* to the hardware for processing. For a channel ring, the ring index
- * identifies the next unused entry to be filled by the AP.
+ * identifies the next unused entry to be filled by the AP. In this
+ * case the initial value is assumed by hardware to be 0.
*
* An event ring consists of event structures filled by the hardware
* and passed to the AP. For event rings, the ring index identifies
* the next ring entry that is not known to have been filled by the
- * hardware.
+ * hardware. The initial value used is arbitrary (so we use 0).
*/
u32 index;
};
@@ -81,17 +74,18 @@ struct gsi_trans_pool {
struct gsi_trans_info {
atomic_t tre_avail; /* TREs available for allocation */
- struct gsi_trans_pool pool; /* transaction pool */
- struct gsi_trans_pool sg_pool; /* scatterlist pool */
- struct gsi_trans_pool cmd_pool; /* command payload DMA pool */
- struct gsi_trans_pool info_pool;/* command information pool */
+
+ u16 free_id; /* first free trans in array */
+ u16 allocated_id; /* first allocated transaction */
+ u16 committed_id; /* first committed transaction */
+ u16 pending_id; /* first pending transaction */
+ u16 completed_id; /* first completed transaction */
+ u16 polled_id; /* first polled transaction */
+ struct gsi_trans *trans; /* transaction array */
struct gsi_trans **map; /* TRE -> transaction map */
- spinlock_t spinlock; /* protects updates to the lists */
- struct list_head alloc; /* allocated, not committed */
- struct list_head pending; /* committed, awaiting completion */
- struct list_head complete; /* completed, awaiting poll */
- struct list_head polled; /* returned by gsi_channel_poll_one() */
+ struct gsi_trans_pool sg_pool; /* scatterlist pool */
+ struct gsi_trans_pool cmd_pool; /* command payload DMA pool */
};
/* Hardware values signifying the state of a channel */
@@ -101,6 +95,7 @@ enum gsi_channel_state {
GSI_CHANNEL_STATE_STARTED = 0x2,
GSI_CHANNEL_STATE_STOPPED = 0x3,
GSI_CHANNEL_STATE_STOP_IN_PROC = 0x4,
+ GSI_CHANNEL_STATE_FLOW_CONTROLLED = 0x5, /* IPA v4.2-v4.9 */
GSI_CHANNEL_STATE_ERROR = 0xf,
};
@@ -110,18 +105,16 @@ struct gsi_channel {
bool toward_ipa;
bool command; /* AP command TX channel or not */
- u8 tlv_count; /* # entries in TLV FIFO */
+ u8 trans_tre_max; /* max TREs in a transaction */
u16 tre_count;
u16 event_count;
- struct completion completion; /* signals channel command completion */
-
struct gsi_ring tre_ring;
u32 evt_ring_id;
+ /* The following counts are used only for TX endpoints */
u64 byte_count; /* total # bytes transferred */
u64 trans_count; /* total # transactions */
- /* The following counts are used only for TX endpoints */
u64 queued_byte_count; /* last reported queued byte count */
u64 queued_trans_count; /* ...and queued trans count */
u64 compl_byte_count; /* last reported completed byte count */
@@ -141,28 +134,27 @@ enum gsi_evt_ring_state {
struct gsi_evt_ring {
struct gsi_channel *channel;
- struct completion completion; /* signals event ring state changes */
struct gsi_ring ring;
};
struct gsi {
struct device *dev; /* Same as IPA device */
enum ipa_version version;
- struct net_device dummy_dev; /* needed for NAPI */
void __iomem *virt_raw; /* I/O mapped address range */
void __iomem *virt; /* Adjusted for most registers */
u32 irq;
u32 channel_count;
u32 evt_ring_count;
- struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
- struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
u32 event_bitmap; /* allocated event rings */
u32 modem_channel_bitmap; /* modem channels to allocate */
u32 type_enabled_bitmap; /* GSI IRQ types enabled */
u32 ieob_enabled_bitmap; /* IEOB IRQ enabled (event rings) */
- struct completion completion; /* for global EE commands */
int result; /* Negative errno (generic commands) */
+ struct completion completion; /* Signals GSI command completion */
struct mutex mutex; /* protects commands, programming */
+ struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
+ struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
+ struct net_device dummy_dev; /* needed for NAPI */
};
/**
@@ -187,20 +179,11 @@ void gsi_teardown(struct gsi *gsi);
* @gsi: GSI pointer
* @channel_id: Channel whose limit is to be returned
*
- * Return: The maximum number of TREs oustanding on the channel
+ * Return: The maximum number of TREs outstanding on the channel
*/
u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id);
/**
- * gsi_channel_trans_tre_max() - Maximum TREs in a single transaction
- * @gsi: GSI pointer
- * @channel_id: Channel whose limit is to be returned
- *
- * Return: The maximum TRE count per transaction on the channel
- */
-u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id);
-
-/**
* gsi_channel_start() - Start an allocated GSI channel
* @gsi: GSI pointer
* @channel_id: Channel to start
@@ -219,6 +202,15 @@ int gsi_channel_start(struct gsi *gsi, u32 channel_id);
int gsi_channel_stop(struct gsi *gsi, u32 channel_id);
/**
+ * gsi_modem_channel_flow_control() - Set channel flow control state (IPA v4.2+)
+ * @gsi: GSI pointer returned by gsi_setup()
+ * @channel_id: Modem TX channel to control
+ * @enable: Whether to enable flow control (i.e., prevent flow)
+ */
+void gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id,
+ bool enable);
+
+/**
* gsi_channel_reset() - Reset an allocated GSI channel
* @gsi: GSI pointer
* @channel_id: Channel to be reset
diff --git a/drivers/net/ipa/gsi_private.h b/drivers/net/ipa/gsi_private.h
index ea333a244cf5..c65f7c5cdc8d 100644
--- a/drivers/net/ipa/gsi_private.h
+++ b/drivers/net/ipa/gsi_private.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _GSI_PRIVATE_H_
#define _GSI_PRIVATE_H_
@@ -16,18 +16,15 @@ struct gsi_channel;
#define GSI_RING_ELEMENT_SIZE 16 /* bytes; must be a power of 2 */
-/* Return the entry that follows one provided in a transaction pool */
-void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element);
-
/**
* gsi_trans_move_complete() - Mark a GSI transaction completed
- * @trans: Transaction to commit
+ * @trans: Transaction whose state is to be updated
*/
void gsi_trans_move_complete(struct gsi_trans *trans);
/**
* gsi_trans_move_polled() - Mark a transaction polled
- * @trans: Transaction to update
+ * @trans: Transaction whose state is to be updated
*/
void gsi_trans_move_polled(struct gsi_trans *trans);
@@ -97,6 +94,14 @@ void gsi_channel_trans_exit(struct gsi_channel *channel);
*/
void gsi_channel_doorbell(struct gsi_channel *channel);
+/* gsi_channel_update() - Update knowledge of channel hardware state
+ * @channel: Channel to be updated
+ *
+ * Consult hardware, change the state of any newly-completed transactions
+ * on a channel.
+ */
+void gsi_channel_update(struct gsi_channel *channel);
+
/**
* gsi_ring_virt() - Return virtual address for a ring entry
* @ring: Ring whose address is to be translated
@@ -105,14 +110,21 @@ void gsi_channel_doorbell(struct gsi_channel *channel);
void *gsi_ring_virt(struct gsi_ring *ring, u32 index);
/**
- * gsi_channel_tx_queued() - Report the number of bytes queued to hardware
- * @channel: Channel whose bytes have been queued
+ * gsi_trans_tx_committed() - Record bytes committed for transmit
+ * @trans: TX endpoint transaction being committed
+ *
+ * Report that a TX transaction has been committed. It updates some
+ * statistics used to manage transmit rates.
+ */
+void gsi_trans_tx_committed(struct gsi_trans *trans);
+
+/**
+ * gsi_trans_tx_queued() - Report a queued TX channel transaction
+ * @trans: Transaction being passed to hardware
*
- * This arranges for the the number of transactions and bytes for
- * transfer that have been queued to hardware to be reported. It
- * passes this information up the network stack so it can be used to
- * throttle transmissions.
+ * Report to the network stack that a TX transaction is being supplied
+ * to the hardware.
*/
-void gsi_channel_tx_queued(struct gsi_channel *channel);
+void gsi_trans_tx_queued(struct gsi_trans *trans);
#endif /* _GSI_PRIVATE_H_ */
diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h
index bf9593d9eaea..3763359f208f 100644
--- a/drivers/net/ipa/gsi_reg.h
+++ b/drivers/net/ipa/gsi_reg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _GSI_REG_H_
#define _GSI_REG_H_
@@ -55,14 +55,10 @@
/* The inter-EE IRQ registers are relative to gsi->virt_raw (IPA v3.5+) */
#define GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET \
- GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_INTER_EE_N_SRC_CH_IRQ_MSK_OFFSET(ee) \
- (0x0000c020 + 0x1000 * (ee))
+ (0x0000c020 + 0x1000 * GSI_EE_AP)
#define GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET \
- GSI_INTER_EE_N_SRC_EV_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_INTER_EE_N_SRC_EV_CH_IRQ_MSK_OFFSET(ee) \
- (0x0000c024 + 0x1000 * (ee))
+ (0x0000c024 + 0x1000 * GSI_EE_AP)
/* All other register offsets are relative to gsi->virt */
@@ -81,9 +77,7 @@ enum gsi_channel_type {
};
#define GSI_CH_C_CNTXT_0_OFFSET(ch) \
- GSI_EE_N_CH_C_CNTXT_0_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_CNTXT_0_OFFSET(ch, ee) \
- (0x0001c000 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c000 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define CHTYPE_PROTOCOL_FMASK GENMASK(2, 0)
#define CHTYPE_DIR_FMASK GENMASK(3, 3)
#define EE_FMASK GENMASK(7, 4)
@@ -112,9 +106,7 @@ chtype_protocol_encoded(enum ipa_version version, enum gsi_channel_type type)
}
#define GSI_CH_C_CNTXT_1_OFFSET(ch) \
- GSI_EE_N_CH_C_CNTXT_1_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_CNTXT_1_OFFSET(ch, ee) \
- (0x0001c004 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c004 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
/* Encoded value for CH_C_CNTXT_1 register R_LENGTH field */
static inline u32 r_length_encoded(enum ipa_version version, u32 length)
@@ -125,19 +117,13 @@ static inline u32 r_length_encoded(enum ipa_version version, u32 length)
}
#define GSI_CH_C_CNTXT_2_OFFSET(ch) \
- GSI_EE_N_CH_C_CNTXT_2_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_CNTXT_2_OFFSET(ch, ee) \
- (0x0001c008 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c008 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_CNTXT_3_OFFSET(ch) \
- GSI_EE_N_CH_C_CNTXT_3_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_CNTXT_3_OFFSET(ch, ee) \
- (0x0001c00c + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c00c + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_QOS_OFFSET(ch) \
- GSI_EE_N_CH_C_QOS_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_QOS_OFFSET(ch, ee) \
- (0x0001c05c + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c05c + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define WRR_WEIGHT_FMASK GENMASK(3, 0)
#define MAX_PREFETCH_FMASK GENMASK(8, 8)
#define USE_DB_ENG_FMASK GENMASK(9, 9)
@@ -158,29 +144,19 @@ enum gsi_prefetch_mode {
};
#define GSI_CH_C_SCRATCH_0_OFFSET(ch) \
- GSI_EE_N_CH_C_SCRATCH_0_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_SCRATCH_0_OFFSET(ch, ee) \
- (0x0001c060 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c060 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_SCRATCH_1_OFFSET(ch) \
- GSI_EE_N_CH_C_SCRATCH_1_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_SCRATCH_1_OFFSET(ch, ee) \
- (0x0001c064 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c064 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_SCRATCH_2_OFFSET(ch) \
- GSI_EE_N_CH_C_SCRATCH_2_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_SCRATCH_2_OFFSET(ch, ee) \
- (0x0001c068 + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c068 + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_CH_C_SCRATCH_3_OFFSET(ch) \
- GSI_EE_N_CH_C_SCRATCH_3_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_SCRATCH_3_OFFSET(ch, ee) \
- (0x0001c06c + 0x4000 * (ee) + 0x80 * (ch))
+ (0x0001c06c + 0x4000 * GSI_EE_AP + 0x80 * (ch))
#define GSI_EV_CH_E_CNTXT_0_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_0_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_0_OFFSET(ev, ee) \
- (0x0001d000 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d000 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
/* enum gsi_channel_type defines EV_CHTYPE field values in EV_CH_E_CNTXT_0 */
#define EV_CHTYPE_FMASK GENMASK(3, 0)
#define EV_EE_FMASK GENMASK(7, 4)
@@ -190,9 +166,7 @@ enum gsi_prefetch_mode {
#define EV_ELEMENT_SIZE_FMASK GENMASK(31, 24)
#define GSI_EV_CH_E_CNTXT_1_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_1_OFFSET(ev, ee) \
- (0x0001d004 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d004 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
/* Encoded value for EV_CH_C_CNTXT_1 register EV_R_LENGTH field */
static inline u32 ev_r_length_encoded(enum ipa_version version, u32 length)
{
@@ -202,83 +176,53 @@ static inline u32 ev_r_length_encoded(enum ipa_version version, u32 length)
}
#define GSI_EV_CH_E_CNTXT_2_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_2_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_2_OFFSET(ev, ee) \
- (0x0001d008 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d008 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_3_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_3_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_3_OFFSET(ev, ee) \
- (0x0001d00c + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d00c + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_4_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_4_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_4_OFFSET(ev, ee) \
- (0x0001d010 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d010 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_8_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_8_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_8_OFFSET(ev, ee) \
- (0x0001d020 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d020 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define MODT_FMASK GENMASK(15, 0)
#define MODC_FMASK GENMASK(23, 16)
#define MOD_CNT_FMASK GENMASK(31, 24)
#define GSI_EV_CH_E_CNTXT_9_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_9_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_9_OFFSET(ev, ee) \
- (0x0001d024 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d024 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_10_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_10_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_10_OFFSET(ev, ee) \
- (0x0001d028 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d028 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_11_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_11_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_11_OFFSET(ev, ee) \
- (0x0001d02c + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d02c + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_12_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_12_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_12_OFFSET(ev, ee) \
- (0x0001d030 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d030 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_CNTXT_13_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_CNTXT_13_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_CNTXT_13_OFFSET(ev, ee) \
- (0x0001d034 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d034 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_SCRATCH_0_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_SCRATCH_0_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_SCRATCH_0_OFFSET(ev, ee) \
- (0x0001d048 + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d048 + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_EV_CH_E_SCRATCH_1_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_SCRATCH_1_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_SCRATCH_1_OFFSET(ev, ee) \
- (0x0001d04c + 0x4000 * (ee) + 0x80 * (ev))
+ (0x0001d04c + 0x4000 * GSI_EE_AP + 0x80 * (ev))
#define GSI_CH_C_DOORBELL_0_OFFSET(ch) \
- GSI_EE_N_CH_C_DOORBELL_0_OFFSET((ch), GSI_EE_AP)
-#define GSI_EE_N_CH_C_DOORBELL_0_OFFSET(ch, ee) \
- (0x0001e000 + 0x4000 * (ee) + 0x08 * (ch))
+ (0x0001e000 + 0x4000 * GSI_EE_AP + 0x08 * (ch))
#define GSI_EV_CH_E_DOORBELL_0_OFFSET(ev) \
- GSI_EE_N_EV_CH_E_DOORBELL_0_OFFSET((ev), GSI_EE_AP)
-#define GSI_EE_N_EV_CH_E_DOORBELL_0_OFFSET(ev, ee) \
- (0x0001e100 + 0x4000 * (ee) + 0x08 * (ev))
+ (0x0001e100 + 0x4000 * GSI_EE_AP + 0x08 * (ev))
#define GSI_GSI_STATUS_OFFSET \
- GSI_EE_N_GSI_STATUS_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_GSI_STATUS_OFFSET(ee) \
- (0x0001f000 + 0x4000 * (ee))
+ (0x0001f000 + 0x4000 * GSI_EE_AP)
#define ENABLED_FMASK GENMASK(0, 0)
#define GSI_CH_CMD_OFFSET \
- GSI_EE_N_CH_CMD_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CH_CMD_OFFSET(ee) \
- (0x0001f008 + 0x4000 * (ee))
+ (0x0001f008 + 0x4000 * GSI_EE_AP)
#define CH_CHID_FMASK GENMASK(7, 0)
#define CH_OPCODE_FMASK GENMASK(31, 24)
@@ -293,9 +237,7 @@ enum gsi_ch_cmd_opcode {
};
#define GSI_EV_CH_CMD_OFFSET \
- GSI_EE_N_EV_CH_CMD_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_EV_CH_CMD_OFFSET(ee) \
- (0x0001f010 + 0x4000 * (ee))
+ (0x0001f010 + 0x4000 * GSI_EE_AP)
#define EV_CHID_FMASK GENMASK(7, 0)
#define EV_OPCODE_FMASK GENMASK(31, 24)
@@ -307,24 +249,24 @@ enum gsi_evt_cmd_opcode {
};
#define GSI_GENERIC_CMD_OFFSET \
- GSI_EE_N_GENERIC_CMD_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_GENERIC_CMD_OFFSET(ee) \
- (0x0001f018 + 0x4000 * (ee))
+ (0x0001f018 + 0x4000 * GSI_EE_AP)
#define GENERIC_OPCODE_FMASK GENMASK(4, 0)
#define GENERIC_CHID_FMASK GENMASK(9, 5)
#define GENERIC_EE_FMASK GENMASK(13, 10)
+#define GENERIC_PARAMS_FMASK GENMASK(31, 24) /* IPA v4.11+ */
/** enum gsi_generic_cmd_opcode - GENERIC_OPCODE field values in GENERIC_CMD */
enum gsi_generic_cmd_opcode {
GSI_GENERIC_HALT_CHANNEL = 0x1,
GSI_GENERIC_ALLOCATE_CHANNEL = 0x2,
+ GSI_GENERIC_ENABLE_FLOW_CONTROL = 0x3, /* IPA v4.2+ */
+ GSI_GENERIC_DISABLE_FLOW_CONTROL = 0x4, /* IPA v4.2+ */
+ GSI_GENERIC_QUERY_FLOW_CONTROL = 0x5, /* IPA v4.11+ */
};
/* The next register is present for IPA v3.5.1 and above */
#define GSI_GSI_HW_PARAM_2_OFFSET \
- GSI_EE_N_GSI_HW_PARAM_2_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_GSI_HW_PARAM_2_OFFSET(ee) \
- (0x0001f040 + 0x4000 * (ee))
+ (0x0001f040 + 0x4000 * GSI_EE_AP)
#define IRAM_SIZE_FMASK GENMASK(2, 0)
#define NUM_CH_PER_EE_FMASK GENMASK(7, 3)
#define NUM_EV_PER_EE_FMASK GENMASK(12, 8)
@@ -353,13 +295,9 @@ enum gsi_iram_size {
/* IRQ condition for each type is cleared by writing type-specific register */
#define GSI_CNTXT_TYPE_IRQ_OFFSET \
- GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_TYPE_IRQ_OFFSET(ee) \
- (0x0001f080 + 0x4000 * (ee))
+ (0x0001f080 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_TYPE_IRQ_MSK_OFFSET \
- GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(ee) \
- (0x0001f088 + 0x4000 * (ee))
+ (0x0001f088 + 0x4000 * GSI_EE_AP)
/* Values here are bit positions in the TYPE_IRQ and TYPE_IRQ_MSK registers */
enum gsi_irq_type_id {
@@ -373,62 +311,38 @@ enum gsi_irq_type_id {
};
#define GSI_CNTXT_SRC_CH_IRQ_OFFSET \
- GSI_EE_N_CNTXT_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_CH_IRQ_OFFSET(ee) \
- (0x0001f090 + 0x4000 * (ee))
+ (0x0001f090 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET \
- GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_OFFSET(ee) \
- (0x0001f094 + 0x4000 * (ee))
+ (0x0001f094 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET \
- GSI_EE_N_CNTXT_SRC_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_CH_IRQ_MSK_OFFSET(ee) \
- (0x0001f098 + 0x4000 * (ee))
+ (0x0001f098 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET \
- GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET(ee) \
- (0x0001f09c + 0x4000 * (ee))
+ (0x0001f09c + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET \
- GSI_EE_N_CNTXT_SRC_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_CH_IRQ_CLR_OFFSET(ee) \
- (0x0001f0a0 + 0x4000 * (ee))
+ (0x0001f0a0 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET \
- GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET(ee) \
- (0x0001f0a4 + 0x4000 * (ee))
+ (0x0001f0a4 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_IEOB_IRQ_OFFSET \
- GSI_EE_N_CNTXT_SRC_IEOB_IRQ_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_IEOB_IRQ_OFFSET(ee) \
- (0x0001f0b0 + 0x4000 * (ee))
+ (0x0001f0b0 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET \
- GSI_EE_N_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET(ee) \
- (0x0001f0b8 + 0x4000 * (ee))
+ (0x0001f0b8 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET \
- GSI_EE_N_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET(ee) \
- (0x0001f0c0 + 0x4000 * (ee))
+ (0x0001f0c0 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GLOB_IRQ_STTS_OFFSET \
- GSI_EE_N_CNTXT_GLOB_IRQ_STTS_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GLOB_IRQ_STTS_OFFSET(ee) \
- (0x0001f100 + 0x4000 * (ee))
+ (0x0001f100 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GLOB_IRQ_EN_OFFSET \
- GSI_EE_N_CNTXT_GLOB_IRQ_EN_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GLOB_IRQ_EN_OFFSET(ee) \
- (0x0001f108 + 0x4000 * (ee))
+ (0x0001f108 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GLOB_IRQ_CLR_OFFSET \
- GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GLOB_IRQ_CLR_OFFSET(ee) \
- (0x0001f110 + 0x4000 * (ee))
+ (0x0001f110 + 0x4000 * GSI_EE_AP)
/* Values here are bit positions in the GLOB_IRQ_* registers */
enum gsi_global_irq_id {
ERROR_INT = 0x0,
@@ -438,17 +352,11 @@ enum gsi_global_irq_id {
};
#define GSI_CNTXT_GSI_IRQ_STTS_OFFSET \
- GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(ee) \
- (0x0001f118 + 0x4000 * (ee))
+ (0x0001f118 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GSI_IRQ_EN_OFFSET \
- GSI_EE_N_CNTXT_GSI_IRQ_EN_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GSI_IRQ_EN_OFFSET(ee) \
- (0x0001f120 + 0x4000 * (ee))
+ (0x0001f120 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_GSI_IRQ_CLR_OFFSET \
- GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_GSI_IRQ_CLR_OFFSET(ee) \
- (0x0001f128 + 0x4000 * (ee))
+ (0x0001f128 + 0x4000 * GSI_EE_AP)
/* Values here are bit positions in the (general) GSI_IRQ_* registers */
enum gsi_general_id {
BREAK_POINT = 0x0,
@@ -458,15 +366,11 @@ enum gsi_general_id {
};
#define GSI_CNTXT_INTSET_OFFSET \
- GSI_EE_N_CNTXT_INTSET_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_INTSET_OFFSET(ee) \
- (0x0001f180 + 0x4000 * (ee))
+ (0x0001f180 + 0x4000 * GSI_EE_AP)
#define INTYPE_FMASK GENMASK(0, 0)
#define GSI_ERROR_LOG_OFFSET \
- GSI_EE_N_ERROR_LOG_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_ERROR_LOG_OFFSET(ee) \
- (0x0001f200 + 0x4000 * (ee))
+ (0x0001f200 + 0x4000 * GSI_EE_AP)
/* Fields below are present for IPA v3.5.1 and above */
#define ERR_ARG3_FMASK GENMASK(3, 0)
@@ -497,21 +401,17 @@ enum gsi_err_type {
};
#define GSI_ERROR_LOG_CLR_OFFSET \
- GSI_EE_N_ERROR_LOG_CLR_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_ERROR_LOG_CLR_OFFSET(ee) \
- (0x0001f210 + 0x4000 * (ee))
+ (0x0001f210 + 0x4000 * GSI_EE_AP)
#define GSI_CNTXT_SCRATCH_0_OFFSET \
- GSI_EE_N_CNTXT_SCRATCH_0_OFFSET(GSI_EE_AP)
-#define GSI_EE_N_CNTXT_SCRATCH_0_OFFSET(ee) \
- (0x0001f400 + 0x4000 * (ee))
+ (0x0001f400 + 0x4000 * GSI_EE_AP)
#define INTER_EE_RESULT_FMASK GENMASK(2, 0)
#define GENERIC_EE_RESULT_FMASK GENMASK(7, 5)
/** enum gsi_generic_ee_result - GENERIC_EE_RESULT field values in SCRATCH_0 */
enum gsi_generic_ee_result {
GENERIC_EE_SUCCESS = 0x1,
- GENERIC_EE_CHANNEL_NOT_RUNNING = 0x2,
+ GENERIC_EE_INCORRECT_CHANNEL_STATE = 0x2,
GENERIC_EE_INCORRECT_DIRECTION = 0x3,
GENERIC_EE_INCORRECT_CHANNEL_TYPE = 0x4,
GENERIC_EE_INCORRECT_CHANNEL = 0x5,
diff --git a/drivers/net/ipa/gsi_trans.c b/drivers/net/ipa/gsi_trans.c
index 1544564bc283..26b7f683a3e1 100644
--- a/drivers/net/ipa/gsi_trans.c
+++ b/drivers/net/ipa/gsi_trans.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -22,37 +22,36 @@
* DOC: GSI Transactions
*
* A GSI transaction abstracts the behavior of a GSI channel by representing
- * everything about a related group of IPA commands in a single structure.
- * (A "command" in this sense is either a data transfer or an IPA immediate
+ * everything about a related group of IPA operations in a single structure.
+ * (A "operation" in this sense is either a data transfer or an IPA immediate
* command.) Most details of interaction with the GSI hardware are managed
- * by the GSI transaction core, allowing users to simply describe commands
+ * by the GSI transaction core, allowing users to simply describe operations
* to be performed. When a transaction has completed a callback function
* (dependent on the type of endpoint associated with the channel) allows
* cleanup of resources associated with the transaction.
*
- * To perform a command (or set of them), a user of the GSI transaction
+ * To perform an operation (or set of them), a user of the GSI transaction
* interface allocates a transaction, indicating the number of TREs required
- * (one per command). If sufficient TREs are available, they are reserved
+ * (one per operation). If sufficient TREs are available, they are reserved
* for use in the transaction and the allocation succeeds. This way
- * exhaustion of the available TREs in a channel ring is detected
- * as early as possible. All resources required to complete a transaction
- * are allocated at transaction allocation time.
+ * exhaustion of the available TREs in a channel ring is detected as early
+ * as possible. Any other resources that might be needed to complete a
+ * transaction are also allocated when the transaction is allocated.
*
- * Commands performed as part of a transaction are represented in an array
- * of Linux scatterlist structures. This array is allocated with the
- * transaction, and its entries are initialized using standard scatterlist
- * functions (such as sg_set_buf() or skb_to_sgvec()).
+ * Operations performed as part of a transaction are represented in an array
+ * of Linux scatterlist structures, allocated with the transaction. These
+ * scatterlist structures are initialized by "adding" operations to the
+ * transaction. If a buffer in an operation must be mapped for DMA, this is
+ * done at the time it is added to the transaction. It is possible for a
+ * mapping error to occur when an operation is added. In this case the
+ * transaction should simply be freed; this correctly releases resources
+ * associated with the transaction.
*
- * Once a transaction's scatterlist structures have been initialized, the
- * transaction is committed. The caller is responsible for mapping buffers
- * for DMA if necessary, and this should be done *before* allocating
- * the transaction. Between a successful allocation and commit of a
- * transaction no errors should occur.
- *
- * Committing transfers ownership of the entire transaction to the GSI
- * transaction core. The GSI transaction code formats the content of
- * the scatterlist array into the channel ring buffer and informs the
- * hardware that new TREs are available to process.
+ * Once all operations have been successfully added to a transaction, the
+ * transaction is committed. Committing transfers ownership of the entire
+ * transaction to the GSI transaction core. The GSI transaction code
+ * formats the content of the scatterlist array into the channel ring
+ * buffer and informs the hardware that new TREs are available to process.
*
* The last TRE in each transaction is marked to interrupt the AP when the
* GSI hardware has completed it. Because transfers described by TREs are
@@ -125,11 +124,10 @@ void gsi_trans_pool_exit(struct gsi_trans_pool *pool)
memset(pool, 0, sizeof(*pool));
}
-/* Allocate the requested number of (zeroed) entries from the pool */
-/* Home-grown DMA pool. This way we can preallocate and use the tre_count
- * to guarantee allocations will succeed. Even though we specify max_alloc
- * (and it can be more than one), we only allow allocation of a single
- * element from a DMA pool.
+/* Home-grown DMA pool. This way we can preallocate the pool, and guarantee
+ * allocations will succeed. The immediate commands in a transaction can
+ * require up to max_alloc elements from the pool. But we only allow
+ * allocation of a single element from a DMA pool at a time.
*/
int gsi_trans_pool_init_dma(struct device *dev, struct gsi_trans_pool *pool,
size_t size, u32 count, u32 max_alloc)
@@ -214,26 +212,14 @@ void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr)
return pool->base + offset;
}
-/* Return the pool element that immediately follows the one given.
- * This only works done if elements are allocated one at a time.
- */
-void *gsi_trans_pool_next(struct gsi_trans_pool *pool, void *element)
+/* Map a TRE ring entry index to the transaction it is associated with */
+static void gsi_trans_map(struct gsi_trans *trans, u32 index)
{
- void *end = pool->base + pool->count * pool->size;
-
- WARN_ON(element < pool->base);
- WARN_ON(element >= end);
- WARN_ON(pool->max_alloc != 1);
-
- element += pool->size;
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
- return element < end ? element : pool->base;
-}
+ /* The completion event will indicate the last TRE used */
+ index += trans->used_count - 1;
-/* Map a given ring entry index to the transaction associated with it */
-static void gsi_channel_trans_map(struct gsi_channel *channel, u32 index,
- struct gsi_trans *trans)
-{
/* Note: index *must* be used modulo the ring count here */
channel->trans_info.map[index % channel->tre_ring.count] = trans;
}
@@ -249,52 +235,63 @@ gsi_channel_trans_mapped(struct gsi_channel *channel, u32 index)
/* Return the oldest completed transaction for a channel (or null) */
struct gsi_trans *gsi_channel_trans_complete(struct gsi_channel *channel)
{
- return list_first_entry_or_null(&channel->trans_info.complete,
- struct gsi_trans, links);
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ u16 trans_id = trans_info->completed_id;
+
+ if (trans_id == trans_info->pending_id) {
+ gsi_channel_update(channel);
+ if (trans_id == trans_info->pending_id)
+ return NULL;
+ }
+
+ return &trans_info->trans[trans_id %= channel->tre_count];
}
-/* Move a transaction from the allocated list to the pending list */
-static void gsi_trans_move_pending(struct gsi_trans *trans)
+/* Move a transaction from allocated to committed state */
+static void gsi_trans_move_committed(struct gsi_trans *trans)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
struct gsi_trans_info *trans_info = &channel->trans_info;
- spin_lock_bh(&trans_info->spinlock);
+ /* This allocated transaction is now committed */
+ trans_info->allocated_id++;
+}
- list_move_tail(&trans->links, &trans_info->pending);
+/* Move committed transactions to pending state */
+static void gsi_trans_move_pending(struct gsi_trans *trans)
+{
+ struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
+ struct gsi_trans_info *trans_info = &channel->trans_info;
+ u16 trans_index = trans - &trans_info->trans[0];
+ u16 delta;
- spin_unlock_bh(&trans_info->spinlock);
+ /* These committed transactions are now pending */
+ delta = trans_index - trans_info->committed_id + 1;
+ trans_info->committed_id += delta % channel->tre_count;
}
-/* Move a transaction and all of its predecessors from the pending list
- * to the completed list.
- */
+/* Move pending transactions to completed state */
void gsi_trans_move_complete(struct gsi_trans *trans)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
struct gsi_trans_info *trans_info = &channel->trans_info;
- struct list_head list;
-
- spin_lock_bh(&trans_info->spinlock);
-
- /* Move this transaction and all predecessors to completed list */
- list_cut_position(&list, &trans_info->pending, &trans->links);
- list_splice_tail(&list, &trans_info->complete);
+ u16 trans_index = trans - trans_info->trans;
+ u16 delta;
- spin_unlock_bh(&trans_info->spinlock);
+ /* These pending transactions are now completed */
+ delta = trans_index - trans_info->pending_id + 1;
+ delta %= channel->tre_count;
+ trans_info->pending_id += delta;
}
-/* Move a transaction from the completed list to the polled list */
+/* Move a transaction from completed to polled state */
void gsi_trans_move_polled(struct gsi_trans *trans)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
struct gsi_trans_info *trans_info = &channel->trans_info;
- spin_lock_bh(&trans_info->spinlock);
-
- list_move_tail(&trans->links, &trans_info->polled);
-
- spin_unlock_bh(&trans_info->spinlock);
+ /* This completed transaction is now polled */
+ trans_info->completed_id++;
}
/* Reserve some number of TREs on a channel. Returns true if successful */
@@ -320,6 +317,17 @@ gsi_trans_tre_release(struct gsi_trans_info *trans_info, u32 tre_count)
atomic_add(tre_count, &trans_info->tre_avail);
}
+/* Return true if no transactions are allocated, false otherwise */
+bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id)
+{
+ u32 tre_max = gsi_channel_tre_max(gsi, channel_id);
+ struct gsi_trans_info *trans_info;
+
+ trans_info = &gsi->channel[channel_id].trans_info;
+
+ return atomic_read(&trans_info->tre_avail) == tre_max;
+}
+
/* Allocate a GSI transaction on a channel */
struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
u32 tre_count,
@@ -328,85 +336,78 @@ struct gsi_trans *gsi_channel_trans_alloc(struct gsi *gsi, u32 channel_id,
struct gsi_channel *channel = &gsi->channel[channel_id];
struct gsi_trans_info *trans_info;
struct gsi_trans *trans;
+ u16 trans_index;
- if (WARN_ON(tre_count > gsi_channel_trans_tre_max(gsi, channel_id)))
+ if (WARN_ON(tre_count > channel->trans_tre_max))
return NULL;
trans_info = &channel->trans_info;
- /* We reserve the TREs now, but consume them at commit time.
- * If there aren't enough available, we're done.
- */
+ /* If we can't reserve the TREs for the transaction, we're done */
if (!gsi_trans_tre_reserve(trans_info, tre_count))
return NULL;
- /* Allocate and initialize non-zero fields in the the transaction */
- trans = gsi_trans_pool_alloc(&trans_info->pool, 1);
+ trans_index = trans_info->free_id % channel->tre_count;
+ trans = &trans_info->trans[trans_index];
+ memset(trans, 0, sizeof(*trans));
+
+ /* Initialize non-zero fields in the transaction */
trans->gsi = gsi;
trans->channel_id = channel_id;
- trans->tre_count = tre_count;
+ trans->rsvd_count = tre_count;
init_completion(&trans->completion);
- /* Allocate the scatterlist and (if requested) info entries. */
+ /* Allocate the scatterlist */
trans->sgl = gsi_trans_pool_alloc(&trans_info->sg_pool, tre_count);
sg_init_marker(trans->sgl, tre_count);
trans->direction = direction;
-
- spin_lock_bh(&trans_info->spinlock);
-
- list_add_tail(&trans->links, &trans_info->alloc);
-
- spin_unlock_bh(&trans_info->spinlock);
-
refcount_set(&trans->refcount, 1);
+ /* This free transaction is now allocated */
+ trans_info->free_id++;
+
return trans;
}
/* Free a previously-allocated transaction */
void gsi_trans_free(struct gsi_trans *trans)
{
- refcount_t *refcount = &trans->refcount;
struct gsi_trans_info *trans_info;
- bool last;
- /* We must hold the lock to release the last reference */
- if (refcount_dec_not_one(refcount))
+ if (!refcount_dec_and_test(&trans->refcount))
return;
+ /* Unused transactions are allocated but never committed, pending,
+ * completed, or polled.
+ */
trans_info = &trans->gsi->channel[trans->channel_id].trans_info;
+ if (!trans->used_count) {
+ trans_info->allocated_id++;
+ trans_info->committed_id++;
+ trans_info->pending_id++;
+ trans_info->completed_id++;
+ } else {
+ ipa_gsi_trans_release(trans);
+ }
- spin_lock_bh(&trans_info->spinlock);
-
- /* Reference might have been added before we got the lock */
- last = refcount_dec_and_test(refcount);
- if (last)
- list_del(&trans->links);
-
- spin_unlock_bh(&trans_info->spinlock);
-
- if (!last)
- return;
-
- ipa_gsi_trans_release(trans);
+ /* This transaction is now free */
+ trans_info->polled_id++;
/* Releasing the reserved TREs implicitly frees the sgl[] and
* (if present) info[] arrays, plus the transaction itself.
*/
- gsi_trans_tre_release(trans_info, trans->tre_count);
+ gsi_trans_tre_release(trans_info, trans->rsvd_count);
}
/* Add an immediate command to a transaction */
void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
- dma_addr_t addr, enum dma_data_direction direction,
- enum ipa_cmd_opcode opcode)
+ dma_addr_t addr, enum ipa_cmd_opcode opcode)
{
- struct ipa_cmd_info *info;
- u32 which = trans->used++;
+ u32 which = trans->used_count++;
struct scatterlist *sg;
- WARN_ON(which >= trans->tre_count);
+ WARN_ON(which >= trans->rsvd_count);
/* Commands are quite different from data transfer requests.
* Their payloads come from a pool whose memory is allocated
@@ -427,9 +428,7 @@ void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
sg_dma_address(sg) = addr;
sg_dma_len(sg) = size;
- info = &trans->info[which];
- info->opcode = opcode;
- info->direction = direction;
+ trans->cmd_opcode[which] = opcode;
}
/* Add a page transfer to a transaction. It will fill the only TRE. */
@@ -439,9 +438,9 @@ int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
struct scatterlist *sg = &trans->sgl[0];
int ret;
- if (WARN_ON(trans->tre_count != 1))
+ if (WARN_ON(trans->rsvd_count != 1))
return -EINVAL;
- if (WARN_ON(trans->used))
+ if (WARN_ON(trans->used_count))
return -EINVAL;
sg_set_page(sg, page, size, offset);
@@ -449,7 +448,7 @@ int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
if (!ret)
return -ENOMEM;
- trans->used++; /* Transaction now owns the (DMA mapped) page */
+ trans->used_count++; /* Transaction now owns the (DMA mapped) page */
return 0;
}
@@ -458,25 +457,26 @@ int gsi_trans_page_add(struct gsi_trans *trans, struct page *page, u32 size,
int gsi_trans_skb_add(struct gsi_trans *trans, struct sk_buff *skb)
{
struct scatterlist *sg = &trans->sgl[0];
- u32 used;
+ u32 used_count;
int ret;
- if (WARN_ON(trans->tre_count != 1))
+ if (WARN_ON(trans->rsvd_count != 1))
return -EINVAL;
- if (WARN_ON(trans->used))
+ if (WARN_ON(trans->used_count))
return -EINVAL;
/* skb->len will not be 0 (checked early) */
ret = skb_to_sgvec(skb, sg, 0, skb->len);
if (ret < 0)
return ret;
- used = ret;
+ used_count = ret;
- ret = dma_map_sg(trans->gsi->dev, sg, used, trans->direction);
+ ret = dma_map_sg(trans->gsi->dev, sg, used_count, trans->direction);
if (!ret)
return -ENOMEM;
- trans->used += used; /* Transaction now owns the (DMA mapped) skb */
+ /* Transaction now owns the (DMA mapped) skb */
+ trans->used_count += used_count;
return 0;
}
@@ -535,68 +535,64 @@ static void gsi_trans_tre_fill(struct gsi_tre *dest_tre, dma_addr_t addr,
*
* Formats channel ring TRE entries based on the content of the scatterlist.
* Maps a transaction pointer to the last ring entry used for the transaction,
- * so it can be recovered when it completes. Moves the transaction to the
- * pending list. Finally, updates the channel ring pointer and optionally
+ * so it can be recovered when it completes. Moves the transaction to
+ * pending state. Finally, updates the channel ring pointer and optionally
* rings the doorbell.
*/
static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
{
struct gsi_channel *channel = &trans->gsi->channel[trans->channel_id];
- struct gsi_ring *ring = &channel->tre_ring;
+ struct gsi_ring *tre_ring = &channel->tre_ring;
enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
bool bei = channel->toward_ipa;
- struct ipa_cmd_info *info;
struct gsi_tre *dest_tre;
struct scatterlist *sg;
u32 byte_count = 0;
+ u8 *cmd_opcode;
u32 avail;
u32 i;
- WARN_ON(!trans->used);
+ WARN_ON(!trans->used_count);
/* Consume the entries. If we cross the end of the ring while
* filling them we'll switch to the beginning to finish.
* If there is no info array we're doing a simple data
* transfer request, whose opcode is IPA_CMD_NONE.
*/
- info = trans->info ? &trans->info[0] : NULL;
- avail = ring->count - ring->index % ring->count;
- dest_tre = gsi_ring_virt(ring, ring->index);
- for_each_sg(trans->sgl, sg, trans->used, i) {
- bool last_tre = i == trans->used - 1;
+ cmd_opcode = channel->command ? &trans->cmd_opcode[0] : NULL;
+ avail = tre_ring->count - tre_ring->index % tre_ring->count;
+ dest_tre = gsi_ring_virt(tre_ring, tre_ring->index);
+ for_each_sg(trans->sgl, sg, trans->used_count, i) {
+ bool last_tre = i == trans->used_count - 1;
dma_addr_t addr = sg_dma_address(sg);
u32 len = sg_dma_len(sg);
byte_count += len;
if (!avail--)
- dest_tre = gsi_ring_virt(ring, 0);
- if (info)
- opcode = info++->opcode;
+ dest_tre = gsi_ring_virt(tre_ring, 0);
+ if (cmd_opcode)
+ opcode = *cmd_opcode++;
gsi_trans_tre_fill(dest_tre, addr, len, last_tre, bei, opcode);
dest_tre++;
}
- ring->index += trans->used;
-
- if (channel->toward_ipa) {
- /* We record TX bytes when they are sent */
- trans->len = byte_count;
- trans->trans_count = channel->trans_count;
- trans->byte_count = channel->byte_count;
- channel->trans_count++;
- channel->byte_count += byte_count;
- }
+ /* Associate the TRE with the transaction */
+ gsi_trans_map(trans, tre_ring->index);
+
+ tre_ring->index += trans->used_count;
- /* Associate the last TRE with the transaction */
- gsi_channel_trans_map(channel, ring->index - 1, trans);
+ trans->len = byte_count;
+ if (channel->toward_ipa)
+ gsi_trans_tx_committed(trans);
- gsi_trans_move_pending(trans);
+ gsi_trans_move_committed(trans);
/* Ring doorbell if requested, or if all TREs are allocated */
if (ring_db || !atomic_read(&channel->trans_info.tre_avail)) {
/* Report what we're handing off to hardware for TX channels */
if (channel->toward_ipa)
- gsi_channel_tx_queued(channel);
+ gsi_trans_tx_queued(trans);
+ gsi_trans_move_pending(trans);
gsi_channel_doorbell(channel);
}
}
@@ -604,7 +600,7 @@ static void __gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
/* Commit a GSI transaction */
void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
{
- if (trans->used)
+ if (trans->used_count)
__gsi_trans_commit(trans, ring_db);
else
gsi_trans_free(trans);
@@ -613,7 +609,7 @@ void gsi_trans_commit(struct gsi_trans *trans, bool ring_db)
/* Commit a GSI transaction and wait for it to complete */
void gsi_trans_commit_wait(struct gsi_trans *trans)
{
- if (!trans->used)
+ if (!trans->used_count)
goto out_trans_free;
refcount_inc(&trans->refcount);
@@ -626,34 +622,12 @@ out_trans_free:
gsi_trans_free(trans);
}
-/* Commit a GSI transaction and wait for it to complete, with timeout */
-int gsi_trans_commit_wait_timeout(struct gsi_trans *trans,
- unsigned long timeout)
-{
- unsigned long timeout_jiffies = msecs_to_jiffies(timeout);
- unsigned long remaining = 1; /* In case of empty transaction */
-
- if (!trans->used)
- goto out_trans_free;
-
- refcount_inc(&trans->refcount);
-
- __gsi_trans_commit(trans, true);
-
- remaining = wait_for_completion_timeout(&trans->completion,
- timeout_jiffies);
-out_trans_free:
- gsi_trans_free(trans);
-
- return remaining ? 0 : -ETIMEDOUT;
-}
-
/* Process the completion of a transaction; called while polling */
void gsi_trans_complete(struct gsi_trans *trans)
{
/* If the entire SGL was mapped when added, unmap it now */
if (trans->direction != DMA_NONE)
- dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used,
+ dma_unmap_sg(trans->gsi->dev, trans->sgl, trans->used_count,
trans->direction);
ipa_gsi_trans_complete(trans);
@@ -667,30 +641,34 @@ void gsi_trans_complete(struct gsi_trans *trans)
void gsi_channel_trans_cancel_pending(struct gsi_channel *channel)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
- struct gsi_trans *trans;
- bool cancelled;
+ u16 trans_id = trans_info->pending_id;
/* channel->gsi->mutex is held by caller */
- spin_lock_bh(&trans_info->spinlock);
- cancelled = !list_empty(&trans_info->pending);
- list_for_each_entry(trans, &trans_info->pending, links)
- trans->cancelled = true;
+ /* If there are no pending transactions, we're done */
+ if (trans_id == trans_info->committed_id)
+ return;
- list_splice_tail_init(&trans_info->pending, &trans_info->complete);
+ /* Mark all pending transactions cancelled */
+ do {
+ struct gsi_trans *trans;
- spin_unlock_bh(&trans_info->spinlock);
+ trans = &trans_info->trans[trans_id % channel->tre_count];
+ trans->cancelled = true;
+ } while (++trans_id != trans_info->committed_id);
+
+ /* All pending transactions are now completed */
+ trans_info->pending_id = trans_info->committed_id;
/* Schedule NAPI polling to complete the cancelled transactions */
- if (cancelled)
- napi_schedule(&channel->napi);
+ napi_schedule(&channel->napi);
}
/* Issue a command to read a single byte from a channel */
int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
- struct gsi_ring *ring = &channel->tre_ring;
+ struct gsi_ring *tre_ring = &channel->tre_ring;
struct gsi_trans_info *trans_info;
struct gsi_tre *dest_tre;
@@ -700,12 +678,12 @@ int gsi_trans_read_byte(struct gsi *gsi, u32 channel_id, dma_addr_t addr)
if (!gsi_trans_tre_reserve(trans_info, 1))
return -EBUSY;
- /* Now fill the the reserved TRE and tell the hardware */
+ /* Now fill the reserved TRE and tell the hardware */
- dest_tre = gsi_ring_virt(ring, ring->index);
+ dest_tre = gsi_ring_virt(tre_ring, tre_ring->index);
gsi_trans_tre_fill(dest_tre, addr, 1, true, false, IPA_CMD_NONE);
- ring->index++;
+ tre_ring->index++;
gsi_channel_doorbell(channel);
return 0;
@@ -723,6 +701,7 @@ void gsi_trans_read_byte_done(struct gsi *gsi, u32 channel_id)
int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
{
struct gsi_channel *channel = &gsi->channel[channel_id];
+ u32 tre_count = channel->tre_count;
struct gsi_trans_info *trans_info;
u32 tre_max;
int ret;
@@ -730,68 +709,66 @@ int gsi_channel_trans_init(struct gsi *gsi, u32 channel_id)
/* Ensure the size of a channel element is what's expected */
BUILD_BUG_ON(sizeof(struct gsi_tre) != GSI_RING_ELEMENT_SIZE);
- /* The map array is used to determine what transaction is associated
- * with a TRE that the hardware reports has completed. We need one
- * map entry per TRE.
- */
trans_info = &channel->trans_info;
- trans_info->map = kcalloc(channel->tre_count, sizeof(*trans_info->map),
- GFP_KERNEL);
- if (!trans_info->map)
- return -ENOMEM;
- /* We can't use more TREs than there are available in the ring.
- * This limits the number of transactions that can be oustanding.
- * Worst case is one TRE per transaction (but we actually limit
- * it to something a little less than that). We allocate resources
- * for transactions (including transaction structures) based on
- * this maximum number.
+ /* The tre_avail field is what ultimately limits the number of
+ * outstanding transactions and their resources. A transaction
+ * allocation succeeds only if the TREs available are sufficient
+ * for what the transaction might need.
*/
tre_max = gsi_channel_tre_max(channel->gsi, channel_id);
+ atomic_set(&trans_info->tre_avail, tre_max);
- /* Transactions are allocated one at a time. */
- ret = gsi_trans_pool_init(&trans_info->pool, sizeof(struct gsi_trans),
- tre_max, 1);
- if (ret)
- goto err_kfree;
+ /* We can't use more TREs than the number available in the ring.
+ * This limits the number of transactions that can be outstanding.
+ * Worst case is one TRE per transaction (but we actually limit
+ * it to something a little less than that). By allocating a
+ * power-of-two number of transactions we can use an index
+ * modulo that number to determine the next one that's free.
+ * Transactions are allocated one at a time.
+ */
+ trans_info->trans = kcalloc(tre_count, sizeof(*trans_info->trans),
+ GFP_KERNEL);
+ if (!trans_info->trans)
+ return -ENOMEM;
+ trans_info->free_id = 0; /* all modulo channel->tre_count */
+ trans_info->allocated_id = 0;
+ trans_info->committed_id = 0;
+ trans_info->pending_id = 0;
+ trans_info->completed_id = 0;
+ trans_info->polled_id = 0;
+
+ /* A completion event contains a pointer to the TRE that caused
+ * the event (which will be the last one used by the transaction).
+ * Each entry in this map records the transaction associated
+ * with a corresponding completed TRE.
+ */
+ trans_info->map = kcalloc(tre_count, sizeof(*trans_info->map),
+ GFP_KERNEL);
+ if (!trans_info->map) {
+ ret = -ENOMEM;
+ goto err_trans_free;
+ }
/* A transaction uses a scatterlist array to represent the data
* transfers implemented by the transaction. Each scatterlist
* element is used to fill a single TRE when the transaction is
* committed. So we need as many scatterlist elements as the
* maximum number of TREs that can be outstanding.
- *
- * All TREs in a transaction must fit within the channel's TLV FIFO.
- * A transaction on a channel can allocate as many TREs as that but
- * no more.
*/
ret = gsi_trans_pool_init(&trans_info->sg_pool,
sizeof(struct scatterlist),
- tre_max, channel->tlv_count);
+ tre_max, channel->trans_tre_max);
if (ret)
- goto err_trans_pool_exit;
-
- /* Finally, the tre_avail field is what ultimately limits the number
- * of outstanding transactions and their resources. A transaction
- * allocation succeeds only if the TREs available are sufficient for
- * what the transaction might need. Transaction resource pools are
- * sized based on the maximum number of outstanding TREs, so there
- * will always be resources available if there are TREs available.
- */
- atomic_set(&trans_info->tre_avail, tre_max);
+ goto err_map_free;
- spin_lock_init(&trans_info->spinlock);
- INIT_LIST_HEAD(&trans_info->alloc);
- INIT_LIST_HEAD(&trans_info->pending);
- INIT_LIST_HEAD(&trans_info->complete);
- INIT_LIST_HEAD(&trans_info->polled);
return 0;
-err_trans_pool_exit:
- gsi_trans_pool_exit(&trans_info->pool);
-err_kfree:
+err_map_free:
kfree(trans_info->map);
+err_trans_free:
+ kfree(trans_info->trans);
dev_err(gsi->dev, "error %d initializing channel %u transactions\n",
ret, channel_id);
@@ -805,6 +782,6 @@ void gsi_channel_trans_exit(struct gsi_channel *channel)
struct gsi_trans_info *trans_info = &channel->trans_info;
gsi_trans_pool_exit(&trans_info->sg_pool);
- gsi_trans_pool_exit(&trans_info->pool);
+ kfree(trans_info->trans);
kfree(trans_info->map);
}
diff --git a/drivers/net/ipa/gsi_trans.h b/drivers/net/ipa/gsi_trans.h
index 17fd1822d8a9..30c1c2dc77c6 100644
--- a/drivers/net/ipa/gsi_trans.h
+++ b/drivers/net/ipa/gsi_trans.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _GSI_TRANS_H_
#define _GSI_TRANS_H_
@@ -22,44 +22,47 @@ struct gsi;
struct gsi_trans;
struct gsi_trans_pool;
+/* Maximum number of TREs in an IPA immediate command transaction */
+#define IPA_COMMAND_TRANS_TRE_MAX 8
+
/**
* struct gsi_trans - a GSI transaction
*
* Most fields in this structure for internal use by the transaction core code:
- * @links: Links for channel transaction lists by state
* @gsi: GSI pointer
* @channel_id: Channel number transaction is associated with
* @cancelled: If set by the core code, transaction was cancelled
- * @tre_count: Number of TREs reserved for this transaction
- * @used: Number of TREs *used* (could be less than tre_count)
- * @len: Total # of transfer bytes represented in sgl[] (set by core)
+ * @rsvd_count: Number of TREs reserved for this transaction
+ * @used_count: Number of TREs *used* (could be less than rsvd_count)
+ * @len: Number of bytes sent or received by the transaction
* @data: Preserved but not touched by the core transaction code
+ * @cmd_opcode: Array of command opcodes (command channel only)
* @sgl: An array of scatter/gather entries managed by core code
- * @info: Array of command information structures (command channel)
* @direction: DMA transfer direction (DMA_NONE for commands)
* @refcount: Reference count used for destruction
* @completion: Completed when the transaction completes
* @byte_count: TX channel byte count recorded when transaction committed
* @trans_count: Channel transaction count when committed (for BQL accounting)
*
- * The size used for some fields in this structure were chosen to ensure
- * the full structure size is no larger than 128 bytes.
+ * The @len field is set when the transaction is committed. For RX
+ * transactions it is updated later to reflect the actual number of bytes
+ * received.
*/
struct gsi_trans {
- struct list_head links; /* gsi_channel lists */
-
struct gsi *gsi;
u8 channel_id;
bool cancelled; /* true if transaction was cancelled */
- u8 tre_count; /* # TREs requested */
- u8 used; /* # entries used in sgl[] */
+ u8 rsvd_count; /* # TREs requested */
+ u8 used_count; /* # entries used in sgl[] */
u32 len; /* total # bytes across sgl[] */
- void *data;
+ union {
+ void *data;
+ u8 cmd_opcode[IPA_COMMAND_TRANS_TRE_MAX];
+ };
struct scatterlist *sgl;
- struct ipa_cmd_info *info; /* array of entries, or null */
enum dma_data_direction direction;
refcount_t refcount;
@@ -71,7 +74,7 @@ struct gsi_trans {
/**
* gsi_trans_pool_init() - Initialize a pool of structures for transactions
- * @pool: GSI transaction poll pointer
+ * @pool: GSI transaction pool pointer
* @size: Size of elements in the pool
* @count: Minimum number of elements in the pool
* @max_alloc: Maximum number of elements allocated at a time from pool
@@ -130,6 +133,16 @@ void *gsi_trans_pool_alloc_dma(struct gsi_trans_pool *pool, dma_addr_t *addr);
void gsi_trans_pool_exit_dma(struct device *dev, struct gsi_trans_pool *pool);
/**
+ * gsi_channel_trans_idle() - Return whether no transactions are allocated
+ * @gsi: GSI pointer
+ * @channel_id: Channel the transaction is associated with
+ *
+ * Return: True if no transactions are allocated, false otherwise
+ *
+ */
+bool gsi_channel_trans_idle(struct gsi *gsi, u32 channel_id);
+
+/**
* gsi_channel_trans_alloc() - Allocate a GSI transaction on a channel
* @gsi: GSI pointer
* @channel_id: Channel the transaction is associated with
@@ -155,12 +168,10 @@ void gsi_trans_free(struct gsi_trans *trans);
* @buf: Buffer pointer for command payload
* @size: Number of bytes in buffer
* @addr: DMA address for payload
- * @direction: Direction of DMA transfer (or DMA_NONE if none required)
* @opcode: IPA immediate command opcode
*/
void gsi_trans_cmd_add(struct gsi_trans *trans, void *buf, u32 size,
- dma_addr_t addr, enum dma_data_direction direction,
- enum ipa_cmd_opcode opcode);
+ dma_addr_t addr, enum ipa_cmd_opcode opcode);
/**
* gsi_trans_page_add() - Add a page transfer to a transaction
@@ -196,15 +207,6 @@ void gsi_trans_commit(struct gsi_trans *trans, bool ring_db);
void gsi_trans_commit_wait(struct gsi_trans *trans);
/**
- * gsi_trans_commit_wait_timeout() - Commit a GSI transaction and wait for
- * it to complete, with timeout
- * @trans: Transaction to commit
- * @timeout: Timeout period (in milliseconds)
- */
-int gsi_trans_commit_wait_timeout(struct gsi_trans *trans,
- unsigned long timeout);
-
-/**
* gsi_trans_read_byte() - Issue a single byte read TRE on a channel
* @gsi: GSI pointer
* @channel_id: Channel on which to read a byte
diff --git a/drivers/net/ipa/ipa.h b/drivers/net/ipa/ipa.h
index 9fc880eb7e3a..09ead433ec38 100644
--- a/drivers/net/ipa/ipa.h
+++ b/drivers/net/ipa/ipa.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_H_
#define _IPA_H_
@@ -44,6 +44,7 @@ struct ipa_interrupt;
* @uc_loaded: true after microcontroller has reported it's ready
* @reg_addr: DMA address used for IPA register access
* @reg_virt: Virtual address used for IPA register access
+ * @regs: IPA register definitions
* @mem_addr: DMA address of IPA-local memory space
* @mem_virt: Virtual address of IPA-local memory space
* @mem_offset: Offset from @mem_virt used for access to IPA memory
@@ -62,6 +63,7 @@ struct ipa_interrupt;
* @initialized: Bit mask indicating endpoints initialized
* @set_up: Bit mask indicating endpoints set up
* @enabled: Bit mask indicating endpoints enabled
+ * @modem_tx_count: Number of defined modem TX endoints
* @endpoint: Array of endpoint information
* @channel_map: Mapping of GSI channel to IPA endpoint
* @name_map: Mapping of IPA endpoint name to IPA endpoint
@@ -89,6 +91,7 @@ struct ipa {
dma_addr_t reg_addr;
void __iomem *reg_virt;
+ const struct ipa_regs *regs;
dma_addr_t mem_addr;
void *mem_virt;
@@ -114,6 +117,7 @@ struct ipa {
u32 set_up;
u32 enabled;
+ u32 modem_tx_count;
struct ipa_endpoint endpoint[IPA_ENDPOINT_MAX];
struct ipa_endpoint *channel_map[GSI_CHANNEL_COUNT_MAX];
struct ipa_endpoint *name_map[IPA_ENDPOINT_COUNT];
diff --git a/drivers/net/ipa/ipa_cmd.c b/drivers/net/ipa/ipa_cmd.c
index d57472ea077f..26c3db9f52b1 100644
--- a/drivers/net/ipa/ipa_cmd.c
+++ b/drivers/net/ipa/ipa_cmd.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -26,14 +26,13 @@
* other than data transfer to another endpoint.
*
* Immediate commands are represented by GSI transactions just like other
- * transfer requests, represented by a single GSI TRE. Each immediate
- * command has a well-defined format, having a payload of a known length.
- * This allows the transfer element's length field to be used to hold an
- * immediate command's opcode. The payload for a command resides in DRAM
- * and is described by a single scatterlist entry in its transaction.
- * Commands do not require a transaction completion callback. To commit
- * an immediate command transaction, either gsi_trans_commit_wait() or
- * gsi_trans_commit_wait_timeout() is used.
+ * transfer requests, and use a single GSI TRE. Each immediate command
+ * has a well-defined format, having a payload of a known length. This
+ * allows the transfer element's length field to be used to hold an
+ * immediate command's opcode. The payload for a command resides in AP
+ * memory and is described by a single scatterlist entry in its transaction.
+ * Commands do not require a transaction completion callback, and are
+ * always issued using gsi_trans_commit_wait().
*/
/* Some commands can wait until indicated pipeline stages are clear */
@@ -306,6 +305,7 @@ static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
/* Check whether offsets passed to register_write are valid */
static bool ipa_cmd_register_write_valid(struct ipa *ipa)
{
+ const struct ipa_reg *reg;
const char *name;
u32 offset;
@@ -313,7 +313,8 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
* offset will fit in a register write IPA immediate command.
*/
if (ipa_table_hash_support(ipa)) {
- offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
+ offset = ipa_reg_offset(reg);
name = "filter/route hash flush";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
@@ -326,7 +327,8 @@ static bool ipa_cmd_register_write_valid(struct ipa *ipa)
* worst case (highest endpoint number) offset of that endpoint
* fits in the register write command field(s) that must hold it.
*/
- offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT - 1);
+ reg = ipa_reg(ipa, ENDP_STATUS);
+ offset = ipa_reg_n_offset(reg, IPA_ENDPOINT_COUNT - 1);
name = "maximal endpoint status";
if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
return false;
@@ -350,29 +352,17 @@ int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
{
struct gsi_trans_info *trans_info = &channel->trans_info;
struct device *dev = channel->gsi->dev;
- int ret;
/* This is as good a place as any to validate build constants */
ipa_cmd_validate_build();
- /* Even though command payloads are allocated one at a time,
- * a single transaction can require up to tlv_count of them,
- * so we treat them as if that many can be allocated at once.
+ /* Command payloads are allocated one at a time, but a single
+ * transaction can require up to the maximum supported by the
+ * channel; treat them as if they were allocated all at once.
*/
- ret = gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
- sizeof(union ipa_cmd_payload),
- tre_max, channel->tlv_count);
- if (ret)
- return ret;
-
- /* Each TRE needs a command info structure */
- ret = gsi_trans_pool_init(&trans_info->info_pool,
- sizeof(struct ipa_cmd_info),
- tre_max, channel->tlv_count);
- if (ret)
- gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
-
- return ret;
+ return gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
+ sizeof(union ipa_cmd_payload),
+ tre_max, channel->trans_tre_max);
}
void ipa_cmd_pool_exit(struct gsi_channel *channel)
@@ -380,7 +370,6 @@ void ipa_cmd_pool_exit(struct gsi_channel *channel)
struct gsi_trans_info *trans_info = &channel->trans_info;
struct device *dev = channel->gsi->dev;
- gsi_trans_pool_exit(&trans_info->info_pool);
gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
}
@@ -403,7 +392,6 @@ void ipa_cmd_table_init_add(struct gsi_trans *trans,
dma_addr_t hash_addr)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
- enum dma_data_direction direction = DMA_TO_DEVICE;
struct ipa_cmd_hw_ip_fltrt_init *payload;
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
@@ -434,7 +422,7 @@ void ipa_cmd_table_init_add(struct gsi_trans *trans,
payload->nhash_rules_addr = cpu_to_le64(addr);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- direction, opcode);
+ opcode);
}
/* Initialize header space in IPA-local memory */
@@ -443,7 +431,6 @@ void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL;
- enum dma_data_direction direction = DMA_TO_DEVICE;
struct ipa_cmd_hw_hdr_init_local *payload;
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
@@ -465,7 +452,7 @@ void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
payload->flags = cpu_to_le32(flags);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- direction, opcode);
+ opcode);
}
void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
@@ -522,7 +509,7 @@ void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
payload->clear_options = cpu_to_le32(options);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- DMA_NONE, opcode);
+ opcode);
}
/* Skip IP packet processing on the next data transfer on a TX channel */
@@ -530,7 +517,6 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT;
- enum dma_data_direction direction = DMA_TO_DEVICE;
struct ipa_cmd_ip_packet_init *payload;
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
@@ -542,7 +528,7 @@ static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- direction, opcode);
+ opcode);
}
/* Use a DMA command to read or write a block of IPA-resident memory */
@@ -553,7 +539,6 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM;
struct ipa_cmd_hw_dma_mem_mem *payload;
union ipa_cmd_payload *cmd_payload;
- enum dma_data_direction direction;
dma_addr_t payload_addr;
u16 flags;
@@ -584,17 +569,14 @@ void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
payload->flags = cpu_to_le16(flags);
payload->system_addr = cpu_to_le64(addr);
- direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
-
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- direction, opcode);
+ opcode);
}
static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
- enum dma_data_direction direction = DMA_TO_DEVICE;
struct ipa_cmd_ip_packet_tag_status *payload;
union ipa_cmd_payload *cmd_payload;
dma_addr_t payload_addr;
@@ -605,14 +587,13 @@ static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- direction, opcode);
+ opcode);
}
/* Issue a small command TX data transfer */
static void ipa_cmd_transfer_add(struct gsi_trans *trans)
{
struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
- enum dma_data_direction direction = DMA_TO_DEVICE;
enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
union ipa_cmd_payload *payload;
dma_addr_t payload_addr;
@@ -621,7 +602,7 @@ static void ipa_cmd_transfer_add(struct gsi_trans *trans)
payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
- direction, opcode);
+ opcode);
}
/* Add immediate commands to a transaction to clear the hardware pipeline */
@@ -661,28 +642,16 @@ void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
wait_for_completion(&ipa->completion);
}
-static struct ipa_cmd_info *
-ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count)
-{
- struct gsi_channel *channel;
-
- channel = &endpoint->ipa->gsi.channel[endpoint->channel_id];
-
- return gsi_trans_pool_alloc(&channel->trans_info.info_pool, tre_count);
-}
-
/* Allocate a transaction for the command TX endpoint */
struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
{
struct ipa_endpoint *endpoint;
- struct gsi_trans *trans;
- endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
+ if (WARN_ON(tre_count > IPA_COMMAND_TRANS_TRE_MAX))
+ return NULL;
- trans = gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
- tre_count, DMA_NONE);
- if (trans)
- trans->info = ipa_cmd_info_alloc(endpoint, tre_count);
+ endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
- return trans;
+ return gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
+ tre_count, DMA_NONE);
}
diff --git a/drivers/net/ipa/ipa_cmd.h b/drivers/net/ipa/ipa_cmd.h
index 05ed7e42e184..8e4243c1f0bb 100644
--- a/drivers/net/ipa/ipa_cmd.h
+++ b/drivers/net/ipa/ipa_cmd.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_CMD_H_
#define _IPA_CMD_H_
@@ -47,17 +47,6 @@ enum ipa_cmd_opcode {
};
/**
- * struct ipa_cmd_info - information needed for an IPA immediate command
- *
- * @opcode: The command opcode.
- * @direction: Direction of data transfer for DMA commands
- */
-struct ipa_cmd_info {
- enum ipa_cmd_opcode opcode;
- enum dma_data_direction direction;
-};
-
-/**
* ipa_cmd_table_valid() - Validate a memory region holding a table
* @ipa: - IPA pointer
* @mem: - IPA memory region descriptor
diff --git a/drivers/net/ipa/ipa_data.h b/drivers/net/ipa/ipa_data.h
index 6d329e9ce5d2..e5a6ce75c7dd 100644
--- a/drivers/net/ipa/ipa_data.h
+++ b/drivers/net/ipa/ipa_data.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_DATA_H_
#define _IPA_DATA_H_
@@ -31,7 +31,7 @@
* communication path between the IPA and a particular execution environment
* (EE), such as the AP or Modem. Each EE has a set of channels associated
* with it, and each channel has an ID unique for that EE. For the most part
- * the only GSI channels of concern to this driver belong to the AP
+ * the only GSI channels of concern to this driver belong to the AP.
*
* An endpoint is an IPA construct representing a single channel anywhere
* in the system. An IPA endpoint ID maps directly to an (EE, channel_id)
@@ -96,69 +96,9 @@ struct gsi_channel_data {
};
/**
- * struct ipa_endpoint_tx_data - configuration data for TX endpoints
- * @seq_type: primary packet processing sequencer type
- * @seq_rep_type: sequencer type for replication processing
- * @status_endpoint: endpoint to which status elements are sent
- *
- * The @status_endpoint is only valid if the endpoint's @status_enable
- * flag is set.
- */
-struct ipa_endpoint_tx_data {
- enum ipa_seq_type seq_type;
- enum ipa_seq_rep_type seq_rep_type;
- enum ipa_endpoint_name status_endpoint;
-};
-
-/**
- * struct ipa_endpoint_rx_data - configuration data for RX endpoints
- * @pad_align: power-of-2 boundary to which packet payload is aligned
- * @aggr_close_eof: whether aggregation closes on end-of-frame
- *
- * With each packet it transfers, the IPA hardware can perform certain
- * transformations of its packet data. One of these is adding pad bytes
- * to the end of the packet data so the result ends on a power-of-2 boundary.
- *
- * It is also able to aggregate multiple packets into a single receive buffer.
- * Aggregation is "open" while a buffer is being filled, and "closes" when
- * certain criteria are met. One of those criteria is the sender indicating
- * a "frame" consisting of several transfers has ended.
- */
-struct ipa_endpoint_rx_data {
- u32 pad_align;
- bool aggr_close_eof;
-};
-
-/**
- * struct ipa_endpoint_config_data - IPA endpoint hardware configuration
- * @resource_group: resource group to assign endpoint to
- * @checksum: whether checksum offload is enabled
- * @qmap: whether endpoint uses QMAP protocol
- * @aggregation: whether endpoint supports aggregation
- * @status_enable: whether endpoint uses status elements
- * @dma_mode: whether endpoint operates in DMA mode
- * @dma_endpoint: peer endpoint, if operating in DMA mode
- * @tx: TX-specific endpoint information (see above)
- * @rx: RX-specific endpoint information (see above)
- */
-struct ipa_endpoint_config_data {
- u32 resource_group;
- bool checksum;
- bool qmap;
- bool aggregation;
- bool status_enable;
- bool dma_mode;
- enum ipa_endpoint_name dma_endpoint;
- union {
- struct ipa_endpoint_tx_data tx;
- struct ipa_endpoint_rx_data rx;
- };
-};
-
-/**
* struct ipa_endpoint_data - IPA endpoint configuration data
* @filter_support: whether endpoint supports filtering
- * @config: hardware configuration (see above)
+ * @config: hardware configuration
*
* Not all endpoints support the IPA filtering capability. A filter table
* defines the filters to apply for those endpoints that support it. The
@@ -166,12 +106,12 @@ struct ipa_endpoint_config_data {
* for non-AP endpoints. For this reason we define *all* endpoints used
* in the system, and indicate whether they support filtering.
*
- * The remaining endpoint configuration data applies only to AP endpoints.
+ * The remaining endpoint configuration data specifies default hardware
+ * configuration values that apply only to AP endpoints.
*/
struct ipa_endpoint_data {
bool filter_support;
- /* Everything else is specified only for AP endpoints */
- struct ipa_endpoint_config_data config;
+ struct ipa_endpoint_config config;
};
/**
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c
index 03a170993420..093e11ec7c2d 100644
--- a/drivers/net/ipa/ipa_endpoint.c
+++ b/drivers/net/ipa/ipa_endpoint.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -23,12 +23,8 @@
#include "ipa_gsi.h"
#include "ipa_power.h"
-#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
-
-#define IPA_REPLENISH_BATCH 16
-
-/* RX buffer is 1 page (or a power-of-2 contiguous pages) */
-#define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */
+/* Hardware is told about receive buffers once a "batch" has been queued */
+#define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
/* The amount of RX buffer space consumed by standard skb overhead */
#define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
@@ -37,7 +33,6 @@
#define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
-#define IPA_AGGR_TIME_LIMIT 500 /* microseconds */
/** enum ipa_status_opcode - status element opcode hardware values */
enum ipa_status_opcode {
@@ -75,6 +70,24 @@ struct ipa_status {
#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
#define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
+/* Compute the aggregation size value to use for a given buffer size */
+static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
+{
+ /* A hard aggregation limit will not be crossed; aggregation closes
+ * if saving incoming data would cross the hard byte limit boundary.
+ *
+ * With a soft limit, aggregation closes *after* the size boundary
+ * has been crossed. In that case the limit must leave enough space
+ * after that limit to receive a full MTU of data plus overhead.
+ */
+ if (!aggr_hard_limit)
+ rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
+
+ /* The byte limit is encoded as a number of kilobytes */
+
+ return rx_buffer_size / SZ_1K;
+}
+
static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *all_data,
const struct ipa_gsi_endpoint_data *data)
@@ -87,6 +100,12 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return true;
if (!data->toward_ipa) {
+ const struct ipa_endpoint_rx *rx_config;
+ const struct ipa_reg *reg;
+ u32 buffer_size;
+ u32 aggr_size;
+ u32 limit;
+
if (data->endpoint.filter_support) {
dev_err(dev, "filtering not supported for "
"RX endpoint %u\n",
@@ -94,9 +113,77 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return false;
}
+ /* Nothing more to check for non-AP RX */
+ if (data->ee_id != GSI_EE_AP)
+ return true;
+
+ rx_config = &data->endpoint.config.rx;
+
+ /* The buffer size must hold an MTU plus overhead */
+ buffer_size = rx_config->buffer_size;
+ limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
+ if (buffer_size < limit) {
+ dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
+ data->endpoint_id, buffer_size, limit);
+ return false;
+ }
+
+ if (!data->endpoint.config.aggregation) {
+ bool result = true;
+
+ /* No aggregation; check for bogus aggregation data */
+ if (rx_config->aggr_time_limit) {
+ dev_err(dev,
+ "time limit with no aggregation for RX endpoint %u\n",
+ data->endpoint_id);
+ result = false;
+ }
+
+ if (rx_config->aggr_hard_limit) {
+ dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
+ data->endpoint_id);
+ result = false;
+ }
+
+ if (rx_config->aggr_close_eof) {
+ dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
+ data->endpoint_id);
+ result = false;
+ }
+
+ return result; /* Nothing more to check */
+ }
+
+ /* For an endpoint supporting receive aggregation, the byte
+ * limit defines the point at which aggregation closes. This
+ * check ensures the receive buffer size doesn't result in a
+ * limit that exceeds what's representable in the aggregation
+ * byte limit field.
+ */
+ aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
+ rx_config->aggr_hard_limit);
+ reg = ipa_reg(ipa, ENDP_INIT_AGGR);
+
+ limit = ipa_reg_field_max(reg, BYTE_LIMIT);
+ if (aggr_size > limit) {
+ dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
+ data->endpoint_id, aggr_size, limit);
+
+ return false;
+ }
+
return true; /* Nothing more to check for RX */
}
+ /* Starting with IPA v4.5 sequencer replication is obsolete */
+ if (ipa->version >= IPA_VERSION_4_5) {
+ if (data->endpoint.config.tx.seq_rep_type) {
+ dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n",
+ data->endpoint_id);
+ return false;
+ }
+ }
+
if (data->endpoint.config.status_enable) {
other_name = data->endpoint.config.tx.status_endpoint;
if (other_name >= count) {
@@ -156,21 +243,12 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
return true;
}
-static u32 aggr_byte_limit_max(enum ipa_version version)
-{
- if (version < IPA_VERSION_4_5)
- return field_max(aggr_byte_limit_fmask(true));
-
- return field_max(aggr_byte_limit_fmask(false));
-}
-
static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
const struct ipa_gsi_endpoint_data *data)
{
const struct ipa_gsi_endpoint_data *dp = data;
struct device *dev = &ipa->pdev->dev;
enum ipa_endpoint_name name;
- u32 limit;
if (count > IPA_ENDPOINT_COUNT) {
dev_err(dev, "too many endpoints specified (%u > %u)\n",
@@ -178,26 +256,6 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
return false;
}
- /* The aggregation byte limit defines the point at which an
- * aggregation window will close. It is programmed into the
- * IPA hardware as a number of KB. We don't use "hard byte
- * limit" aggregation, which means that we need to supply
- * enough space in a receive buffer to hold a complete MTU
- * plus normal skb overhead *after* that aggregation byte
- * limit has been crossed.
- *
- * This check ensures we don't define a receive buffer size
- * that would exceed what we can represent in the field that
- * is used to program its size.
- */
- limit = aggr_byte_limit_max(ipa->version) * SZ_1K;
- limit += IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
- if (limit < IPA_RX_BUFFER_SIZE) {
- dev_err(dev, "buffer size too big for aggregation (%u > %u)\n",
- IPA_RX_BUFFER_SIZE, limit);
- return false;
- }
-
/* Make sure needed endpoints have defined data */
if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
dev_err(dev, "command TX endpoint not defined\n");
@@ -237,28 +295,32 @@ static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
}
/* suspend_delay represents suspend for RX, delay for TX endpoints.
- * Note that suspend is not supported starting with IPA v4.0.
+ * Note that suspend is not supported starting with IPA v4.0, and
+ * delay mode should not be used starting with IPA v4.2.
*/
static bool
ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
{
- u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
+ u32 field_id;
+ u32 offset;
bool state;
u32 mask;
u32 val;
- /* Suspend is not supported for IPA v4.0+. Delay doesn't work
- * correctly on IPA v4.2.
- */
if (endpoint->toward_ipa)
- WARN_ON(ipa->version == IPA_VERSION_4_2);
+ WARN_ON(ipa->version >= IPA_VERSION_4_2);
else
WARN_ON(ipa->version >= IPA_VERSION_4_0);
- mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
-
+ reg = ipa_reg(ipa, ENDP_INIT_CTRL);
+ offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
val = ioread32(ipa->reg_virt + offset);
+
+ field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND;
+ mask = ipa_reg_bit(reg, field_id);
+
state = !!(val & mask);
/* Don't bother if it's already in the requested state */
@@ -270,28 +332,28 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
return state;
}
-/* We currently don't care what the previous state was for delay mode */
+/* We don't care what the previous state was for delay mode */
static void
ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
{
+ /* Delay mode should not be used for IPA v4.2+ */
+ WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
WARN_ON(!endpoint->toward_ipa);
- /* Delay mode doesn't work properly for IPA v4.2 */
- if (endpoint->ipa->version != IPA_VERSION_4_2)
- (void)ipa_endpoint_init_ctrl(endpoint, enable);
+ (void)ipa_endpoint_init_ctrl(endpoint, enable);
}
static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
{
u32 mask = BIT(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
WARN_ON(!(mask & ipa->available));
- offset = ipa_reg_state_aggr_active_offset(ipa->version);
- val = ioread32(ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
return !!(val & mask);
}
@@ -300,10 +362,12 @@ static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
{
u32 mask = BIT(endpoint->endpoint_id);
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
WARN_ON(!(mask & ipa->available));
- iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
+ reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
+ iowrite32(mask, ipa->reg_virt + ipa_reg_offset(reg));
}
/**
@@ -319,7 +383,7 @@ static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
{
struct ipa *ipa = endpoint->ipa;
- if (!endpoint->data->aggregation)
+ if (!endpoint->config.aggregation)
return;
/* Nothing to do if the endpoint doesn't have aggregation open */
@@ -355,26 +419,29 @@ ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
return suspended;
}
-/* Enable or disable delay or suspend mode on all modem endpoints */
+/* Put all modem RX endpoints into suspend mode, and stop transmission
+ * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is
+ * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
+ * control instead.
+ */
void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
{
u32 endpoint_id;
- /* DELAY mode doesn't work correctly on IPA v4.2 */
- if (ipa->version == IPA_VERSION_4_2)
- return;
-
for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
if (endpoint->ee_id != GSI_EE_MODEM)
continue;
- /* Set TX delay mode or RX suspend mode */
- if (endpoint->toward_ipa)
+ if (!endpoint->toward_ipa)
+ (void)ipa_endpoint_program_suspend(endpoint, enable);
+ else if (ipa->version < IPA_VERSION_4_2)
ipa_endpoint_program_delay(endpoint, enable);
else
- (void)ipa_endpoint_program_suspend(endpoint, enable);
+ gsi_modem_channel_flow_control(&ipa->gsi,
+ endpoint->channel_id,
+ enable);
}
}
@@ -385,12 +452,10 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
struct gsi_trans *trans;
u32 count;
- /* We need one command per modem TX endpoint. We can get an upper
- * bound on that by assuming all initialized endpoints are modem->IPA.
- * That won't happen, and we could be more precise, but this is fine
- * for now. End the transaction with commands to clear the pipeline.
+ /* We need one command per modem TX endpoint, plus the commands
+ * that clear the pipeline.
*/
- count = hweight32(initialized) + ipa_cmd_pipeline_clear_count();
+ count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
trans = ipa_cmd_trans_alloc(ipa, count);
if (!trans) {
dev_err(&ipa->pdev->dev,
@@ -401,6 +466,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
while (initialized) {
u32 endpoint_id = __ffs(initialized);
struct ipa_endpoint *endpoint;
+ const struct ipa_reg *reg;
u32 offset;
initialized ^= BIT(endpoint_id);
@@ -410,7 +476,8 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
continue;
- offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
+ reg = ipa_reg(ipa, ENDP_STATUS);
+ offset = ipa_reg_n_offset(reg, endpoint_id);
/* Value written is 0, and all bits are updated. That
* means status is disabled on the endpoint, and as a
@@ -421,7 +488,6 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
ipa_cmd_pipeline_clear_add(trans);
- /* XXX This should have a 1 second timeout */
gsi_trans_commit_wait(trans);
ipa_cmd_pipeline_clear_wait(ipa);
@@ -431,22 +497,23 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
enum ipa_cs_offload_en enabled;
+ const struct ipa_reg *reg;
u32 val = 0;
+ reg = ipa_reg(ipa, ENDP_INIT_CFG);
/* FRAG_OFFLOAD_EN is 0 */
- if (endpoint->data->checksum) {
- enum ipa_version version = endpoint->ipa->version;
+ if (endpoint->config.checksum) {
+ enum ipa_version version = ipa->version;
if (endpoint->toward_ipa) {
- u32 checksum_offset;
+ u32 off;
/* Checksum header offset is in 4-byte units */
- checksum_offset = sizeof(struct rmnet_map_header);
- checksum_offset /= sizeof(u32);
- val |= u32_encode_bits(checksum_offset,
- CS_METADATA_HDR_OFFSET_FMASK);
+ off = sizeof(struct rmnet_map_header) / sizeof(u32);
+ val |= ipa_reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
enabled = version < IPA_VERSION_4_5
? IPA_CS_OFFLOAD_UL
@@ -459,24 +526,26 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
} else {
enabled = IPA_CS_OFFLOAD_NONE;
}
- val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK);
+ val |= ipa_reg_encode(reg, CS_OFFLOAD_EN, enabled);
/* CS_GEN_QMB_MASTER_SEL is 0 */
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
{
- u32 offset;
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val;
if (!endpoint->toward_ipa)
return;
- offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id);
- val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK);
+ reg = ipa_reg(ipa, ENDP_INIT_NAT);
+ val = ipa_reg_encode(reg, NAT_EN, IPA_NAT_BYPASS);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static u32
@@ -485,7 +554,7 @@ ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
u32 header_size = sizeof(struct rmnet_map_header);
/* Without checksum offload, we just have the MAP header */
- if (!endpoint->data->checksum)
+ if (!endpoint->config.checksum)
return header_size;
if (version < IPA_VERSION_4_5) {
@@ -500,6 +569,50 @@ ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
return header_size;
}
+/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
+static u32 ipa_header_size_encode(enum ipa_version version,
+ const struct ipa_reg *reg, u32 header_size)
+{
+ u32 field_max = ipa_reg_field_max(reg, HDR_LEN);
+ u32 val;
+
+ /* We know field_max can be used as a mask (2^n - 1) */
+ val = ipa_reg_encode(reg, HDR_LEN, header_size & field_max);
+ if (version < IPA_VERSION_4_5) {
+ WARN_ON(header_size > field_max);
+ return val;
+ }
+
+ /* IPA v4.5 adds a few more most-significant bits */
+ header_size >>= hweight32(field_max);
+ WARN_ON(header_size > ipa_reg_field_max(reg, HDR_LEN_MSB));
+ val |= ipa_reg_encode(reg, HDR_LEN_MSB, header_size);
+
+ return val;
+}
+
+/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
+static u32 ipa_metadata_offset_encode(enum ipa_version version,
+ const struct ipa_reg *reg, u32 offset)
+{
+ u32 field_max = ipa_reg_field_max(reg, HDR_OFST_METADATA);
+ u32 val;
+
+ /* We know field_max can be used as a mask (2^n - 1) */
+ val = ipa_reg_encode(reg, HDR_OFST_METADATA, offset);
+ if (version < IPA_VERSION_4_5) {
+ WARN_ON(offset > field_max);
+ return val;
+ }
+
+ /* IPA v4.5 adds a few more most-significant bits */
+ offset >>= hweight32(field_max);
+ WARN_ON(offset > ipa_reg_field_max(reg, HDR_OFST_METADATA_MSB));
+ val |= ipa_reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
+
+ return val;
+}
+
/**
* ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
* @endpoint: Endpoint pointer
@@ -523,36 +636,38 @@ ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
*/
static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
- if (endpoint->data->qmap) {
+ reg = ipa_reg(ipa, ENDP_INIT_HDR);
+ if (endpoint->config.qmap) {
enum ipa_version version = ipa->version;
size_t header_size;
header_size = ipa_qmap_header_size(version, endpoint);
- val = ipa_header_size_encoded(version, header_size);
+ val = ipa_header_size_encode(version, reg, header_size);
/* Define how to fill fields in a received QMAP header */
if (!endpoint->toward_ipa) {
- u32 offset; /* Field offset within header */
+ u32 off; /* Field offset within header */
/* Where IPA will write the metadata value */
- offset = offsetof(struct rmnet_map_header, mux_id);
- val |= ipa_metadata_offset_encoded(version, offset);
+ off = offsetof(struct rmnet_map_header, mux_id);
+ val |= ipa_metadata_offset_encode(version, reg, off);
/* Where IPA will write the length */
- offset = offsetof(struct rmnet_map_header, pkt_len);
+ off = offsetof(struct rmnet_map_header, pkt_len);
/* Upper bits are stored in HDR_EXT with IPA v4.5 */
if (version >= IPA_VERSION_4_5)
- offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK);
+ off &= ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
- val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
- val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK);
+ val |= ipa_reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
+ val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE, off);
}
/* For QMAP TX, metadata offset is 0 (modem assumes this) */
- val |= HDR_OFST_METADATA_VALID_FMASK;
+ val |= ipa_reg_bit(reg, HDR_OFST_METADATA_VALID);
/* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
/* HDR_A5_MUX is 0 */
@@ -560,228 +675,211 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
/* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
}
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
- u32 pad_align = endpoint->data->rx.pad_align;
+ u32 pad_align = endpoint->config.rx.pad_align;
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
- val |= HDR_ENDIANNESS_FMASK; /* big endian */
-
- /* A QMAP header contains a 6 bit pad field at offset 0. The RMNet
- * driver assumes this field is meaningful in packets it receives,
- * and assumes the header's payload length includes that padding.
- * The RMNet driver does *not* pad packets it sends, however, so
- * the pad field (although 0) should be ignored.
- */
- if (endpoint->data->qmap && !endpoint->toward_ipa) {
- val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
- /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
- val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
- /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
+ reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
+ if (endpoint->config.qmap) {
+ /* We have a header, so we must specify its endianness */
+ val |= ipa_reg_bit(reg, HDR_ENDIANNESS); /* big endian */
+
+ /* A QMAP header contains a 6 bit pad field at offset 0.
+ * The RMNet driver assumes this field is meaningful in
+ * packets it receives, and assumes the header's payload
+ * length includes that padding. The RMNet driver does
+ * *not* pad packets it sends, however, so the pad field
+ * (although 0) should be ignored.
+ */
+ if (!endpoint->toward_ipa) {
+ val |= ipa_reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
+ /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
+ val |= ipa_reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
+ /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
+ }
}
/* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
if (!endpoint->toward_ipa)
- val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
+ val |= ipa_reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
/* IPA v4.5 adds some most-significant bits to a few fields,
* two of which are defined in the HDR (not HDR_EXT) register.
*/
if (ipa->version >= IPA_VERSION_4_5) {
/* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
- if (endpoint->data->qmap && !endpoint->toward_ipa) {
- u32 offset;
-
- offset = offsetof(struct rmnet_map_header, pkt_len);
- offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK);
- val |= u32_encode_bits(offset,
- HDR_OFST_PKT_SIZE_MSB_FMASK);
+ if (endpoint->config.qmap && !endpoint->toward_ipa) {
+ u32 mask = ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
+ u32 off; /* Field offset within header */
+
+ off = offsetof(struct rmnet_map_header, pkt_len);
+ /* Low bits are in the ENDP_INIT_HDR register */
+ off >>= hweight32(mask);
+ val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
/* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
}
}
- iowrite32(val, ipa->reg_virt + offset);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
u32 offset;
if (endpoint->toward_ipa)
return; /* Register not valid for TX endpoints */
- offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
+ reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK);
+ offset = ipa_reg_n_offset(reg, endpoint_id);
/* Note that HDR_ENDIANNESS indicates big endian header fields */
- if (endpoint->data->qmap)
+ if (endpoint->config.qmap)
val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + offset);
}
static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
+ u32 offset;
u32 val;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
- if (endpoint->data->dma_mode) {
- enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
- u32 dma_endpoint_id;
+ reg = ipa_reg(ipa, ENDP_INIT_MODE);
+ if (endpoint->config.dma_mode) {
+ enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
+ u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
- dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
-
- val = u32_encode_bits(IPA_DMA, MODE_FMASK);
- val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
+ val = ipa_reg_encode(reg, ENDP_MODE, IPA_DMA);
+ val |= ipa_reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
} else {
- val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
+ val = ipa_reg_encode(reg, ENDP_MODE, IPA_BASIC);
}
/* All other bits unspecified (and 0) */
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
+ iowrite32(val, ipa->reg_virt + offset);
}
-/* Compute the aggregation size value to use for a given buffer size */
-static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
+/* For IPA v4.5+, times are expressed using Qtime. The AP uses one of two
+ * pulse generators (0 and 1) to measure elapsed time. In ipa_qtime_config()
+ * they're configured to have granularity 100 usec and 1 msec, respectively.
+ *
+ * The return value is the positive or negative Qtime value to use to
+ * express the (microsecond) time provided. A positive return value
+ * means pulse generator 0 can be used; otherwise use pulse generator 1.
+ */
+static int ipa_qtime_val(u32 microseconds, u32 max)
{
- /* We don't use "hard byte limit" aggregation, so we define the
- * aggregation limit such that our buffer has enough space *after*
- * that limit to receive a full MTU of data, plus overhead.
- */
- rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
+ u32 val;
- return rx_buffer_size / SZ_1K;
-}
+ /* Use 100 microsecond granularity if possible */
+ val = DIV_ROUND_CLOSEST(microseconds, 100);
+ if (val <= max)
+ return (int)val;
-/* Encoded values for AGGR endpoint register fields */
-static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit)
-{
- if (version < IPA_VERSION_4_5)
- return u32_encode_bits(limit, aggr_byte_limit_fmask(true));
+ /* Have to use pulse generator 1 (millisecond granularity) */
+ val = DIV_ROUND_CLOSEST(microseconds, 1000);
+ WARN_ON(val > max);
- return u32_encode_bits(limit, aggr_byte_limit_fmask(false));
+ return (int)-val;
}
/* Encode the aggregation timer limit (microseconds) based on IPA version */
-static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit)
+static u32 aggr_time_limit_encode(struct ipa *ipa, const struct ipa_reg *reg,
+ u32 microseconds)
{
- u32 gran_sel;
- u32 fmask;
+ u32 max;
u32 val;
- if (version < IPA_VERSION_4_5) {
- /* We set aggregation granularity in ipa_hardware_config() */
- limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
+ if (!microseconds)
+ return 0; /* Nothing to compute if time limit is 0 */
- return u32_encode_bits(limit, aggr_time_limit_fmask(true));
- }
+ max = ipa_reg_field_max(reg, TIME_LIMIT);
+ if (ipa->version >= IPA_VERSION_4_5) {
+ u32 gran_sel;
+ int ret;
+
+ /* Compute the Qtime limit value to use */
+ ret = ipa_qtime_val(microseconds, max);
+ if (ret < 0) {
+ val = -ret;
+ gran_sel = ipa_reg_bit(reg, AGGR_GRAN_SEL);
+ } else {
+ val = ret;
+ gran_sel = 0;
+ }
- /* IPA v4.5 expresses the time limit using Qtime. The AP has
- * pulse generators 0 and 1 available, which were configured
- * in ipa_qtime_config() to have granularity 100 usec and
- * 1 msec, respectively. Use pulse generator 0 if possible,
- * otherwise fall back to pulse generator 1.
- */
- fmask = aggr_time_limit_fmask(false);
- val = DIV_ROUND_CLOSEST(limit, 100);
- if (val > field_max(fmask)) {
- /* Have to use pulse generator 1 (millisecond granularity) */
- gran_sel = AGGR_GRAN_SEL_FMASK;
- val = DIV_ROUND_CLOSEST(limit, 1000);
- } else {
- /* We can use pulse generator 0 (100 usec granularity) */
- gran_sel = 0;
+ return gran_sel | ipa_reg_encode(reg, TIME_LIMIT, val);
}
- return gran_sel | u32_encode_bits(val, fmask);
-}
+ /* We program aggregation granularity in ipa_hardware_config() */
+ val = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
+ WARN(val > max, "aggr_time_limit too large (%u > %u usec)\n",
+ microseconds, max * IPA_AGGR_GRANULARITY);
-static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled)
-{
- u32 val = enabled ? 1 : 0;
-
- if (version < IPA_VERSION_4_5)
- return u32_encode_bits(val, aggr_sw_eof_active_fmask(true));
-
- return u32_encode_bits(val, aggr_sw_eof_active_fmask(false));
+ return ipa_reg_encode(reg, TIME_LIMIT, val);
}
static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
- enum ipa_version version = endpoint->ipa->version;
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
- if (endpoint->data->aggregation) {
+ reg = ipa_reg(ipa, ENDP_INIT_AGGR);
+ if (endpoint->config.aggregation) {
if (!endpoint->toward_ipa) {
- bool close_eof;
+ const struct ipa_endpoint_rx *rx_config;
+ u32 buffer_size;
u32 limit;
- val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
- val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
+ rx_config = &endpoint->config.rx;
+ val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
+ val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
- limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
- val |= aggr_byte_limit_encoded(version, limit);
+ buffer_size = rx_config->buffer_size;
+ limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
+ rx_config->aggr_hard_limit);
+ val |= ipa_reg_encode(reg, BYTE_LIMIT, limit);
- limit = IPA_AGGR_TIME_LIMIT;
- val |= aggr_time_limit_encoded(version, limit);
+ limit = rx_config->aggr_time_limit;
+ val |= aggr_time_limit_encode(ipa, reg, limit);
/* AGGR_PKT_LIMIT is 0 (unlimited) */
- close_eof = endpoint->data->rx.aggr_close_eof;
- val |= aggr_sw_eof_active_encoded(version, close_eof);
-
- /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
+ if (rx_config->aggr_close_eof)
+ val |= ipa_reg_bit(reg, SW_EOF_ACTIVE);
} else {
- val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
- AGGR_EN_FMASK);
- val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
+ val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
+ val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
/* other fields ignored */
}
/* AGGR_FORCE_CLOSE is 0 */
/* AGGR_GRAN_SEL is 0 for IPA v4.5 */
} else {
- val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
+ val |= ipa_reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
/* other fields ignored */
}
- iowrite32(val, endpoint->ipa->reg_virt + offset);
-}
-
-/* Return the Qtime-based head-of-line blocking timer value that
- * represents the given number of microseconds. The result
- * includes both the timer value and the selected timer granularity.
- */
-static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds)
-{
- u32 gran_sel;
- u32 val;
-
- /* IPA v4.5 expresses time limits using Qtime. The AP has
- * pulse generators 0 and 1 available, which were configured
- * in ipa_qtime_config() to have granularity 100 usec and
- * 1 msec, respectively. Use pulse generator 0 if possible,
- * otherwise fall back to pulse generator 1.
- */
- val = DIV_ROUND_CLOSEST(microseconds, 100);
- if (val > field_max(TIME_LIMIT_FMASK)) {
- /* Have to use pulse generator 1 (millisecond granularity) */
- gran_sel = GRAN_SEL_FMASK;
- val = DIV_ROUND_CLOSEST(microseconds, 1000);
- } else {
- /* We can use pulse generator 0 (100 usec granularity) */
- gran_sel = 0;
- }
-
- return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
/* The head-of-line blocking timer is defined as a tick count. For
@@ -789,12 +887,11 @@ static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds)
* derived from the 19.2 MHz SoC XO clock. For older IPA versions
* each tick represents 128 cycles of the IPA core clock.
*
- * Return the encoded value that should be written to that register
- * that represents the timeout period provided. For IPA v4.2 this
- * encodes a base and scale value, while for earlier versions the
- * value is a simple tick count.
+ * Return the encoded value representing the timeout period provided
+ * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
*/
-static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
+static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
+ u32 microseconds)
{
u32 width;
u32 scale;
@@ -806,18 +903,34 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
if (!microseconds)
return 0; /* Nothing to compute if timer period is 0 */
- if (ipa->version >= IPA_VERSION_4_5)
- return hol_block_timer_qtime_val(ipa, microseconds);
+ if (ipa->version >= IPA_VERSION_4_5) {
+ u32 max = ipa_reg_field_max(reg, TIMER_LIMIT);
+ u32 gran_sel;
+ int ret;
+
+ /* Compute the Qtime limit value to use */
+ ret = ipa_qtime_val(microseconds, max);
+ if (ret < 0) {
+ val = -ret;
+ gran_sel = ipa_reg_bit(reg, TIMER_GRAN_SEL);
+ } else {
+ val = ret;
+ gran_sel = 0;
+ }
+
+ return gran_sel | ipa_reg_encode(reg, TIMER_LIMIT, val);
+ }
- /* Use 64 bit arithmetic to avoid overflow... */
+ /* Use 64 bit arithmetic to avoid overflow */
rate = ipa_core_clock_rate(ipa);
ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
- /* ...but we still need to fit into a 32-bit register */
- WARN_ON(ticks > U32_MAX);
+
+ /* We still need the result to fit into the field */
+ WARN_ON(ticks > ipa_reg_field_max(reg, TIMER_BASE_VALUE));
/* IPA v3.5.1 through v4.1 just record the tick count */
if (ipa->version < IPA_VERSION_4_2)
- return (u32)ticks;
+ return ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
/* For IPA v4.2, the tick count is represented by base and
* scale fields within the 32-bit timer register, where:
@@ -827,8 +940,8 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
* count, and extract the number of bits in the base field
* such that high bit is included.
*/
- high = fls(ticks); /* 1..32 */
- width = HWEIGHT32(BASE_VALUE_FMASK);
+ high = fls(ticks); /* 1..32 (or warning above) */
+ width = hweight32(ipa_reg_fmask(reg, TIMER_BASE_VALUE));
scale = high > width ? high - width : 0;
if (scale) {
/* If we're scaling, round up to get a closer result */
@@ -838,8 +951,8 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
scale++;
}
- val = u32_encode_bits(scale, SCALE_FMASK);
- val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
+ val = ipa_reg_encode(reg, TIMER_SCALE, scale);
+ val |= ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
return val;
}
@@ -850,28 +963,47 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
/* This should only be changed when HOL_BLOCK_EN is disabled */
- offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
- val = hol_block_timer_val(ipa, microseconds);
- iowrite32(val, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
+ val = hol_block_timer_encode(ipa, reg, microseconds);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void
-ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
+ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
{
u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 offset;
u32 val;
- val = enable ? HOL_BLOCK_EN_FMASK : 0;
- offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
+ offset = ipa_reg_n_offset(reg, endpoint_id);
+ val = enable ? ipa_reg_bit(reg, HOL_BLOCK_EN) : 0;
+
+ iowrite32(val, ipa->reg_virt + offset);
+
/* When enabling, the register must be written twice for IPA v4.5+ */
- if (enable && endpoint->ipa->version >= IPA_VERSION_4_5)
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ if (enable && ipa->version >= IPA_VERSION_4_5)
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+/* Assumes HOL_BLOCK is in disabled state */
+static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
+ u32 microseconds)
+{
+ ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
+ ipa_endpoint_init_hol_block_en(endpoint, true);
+}
+
+static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
+{
+ ipa_endpoint_init_hol_block_en(endpoint, false);
}
void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
@@ -884,54 +1016,65 @@ void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
continue;
- ipa_endpoint_init_hol_block_enable(endpoint, false);
- ipa_endpoint_init_hol_block_timer(endpoint, 0);
- ipa_endpoint_init_hol_block_enable(endpoint, true);
+ ipa_endpoint_init_hol_block_disable(endpoint);
+ ipa_endpoint_init_hol_block_enable(endpoint, 0);
}
}
static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
+ reg = ipa_reg(ipa, ENDP_INIT_DEAGGR);
/* DEAGGR_HDR_LEN is 0 */
/* PACKET_OFFSET_VALID is 0 */
/* PACKET_OFFSET_LOCATION is ignored (not valid) */
/* MAX_PACKET_LEN is 0 (not enforced) */
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id);
+ u32 resource_group = endpoint->config.resource_group;
+ u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val;
- val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group);
- iowrite32(val, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
+ val = ipa_reg_encode(reg, ENDP_RSRC_GRP, resource_group);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
{
- u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
- u32 val = 0;
+ u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
+ u32 val;
if (!endpoint->toward_ipa)
return; /* Register not valid for RX endpoints */
+ reg = ipa_reg(ipa, ENDP_INIT_SEQ);
+
/* Low-order byte configures primary packet processing */
- val |= u32_encode_bits(endpoint->data->tx.seq_type, SEQ_TYPE_FMASK);
+ val = ipa_reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
- /* Second byte configures replicated packet processing */
- val |= u32_encode_bits(endpoint->data->tx.seq_rep_type,
- SEQ_REP_TYPE_FMASK);
+ /* Second byte (if supported) configures replicated packet processing */
+ if (ipa->version < IPA_VERSION_4_5)
+ val |= ipa_reg_encode(reg, SEQ_REP_TYPE,
+ endpoint->config.tx.seq_rep_type);
- iowrite32(val, endpoint->ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
/**
@@ -952,7 +1095,7 @@ int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
* If not, see if we can linearize it before giving up.
*/
nr_frags = skb_shinfo(skb)->nr_frags;
- if (1 + nr_frags > endpoint->trans_tre_max) {
+ if (nr_frags > endpoint->skb_frag_max) {
if (skb_linearize(skb))
return -E2BIG;
nr_frags = 0;
@@ -981,149 +1124,123 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 val = 0;
- u32 offset;
- offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
-
- if (endpoint->data->status_enable) {
- val |= STATUS_EN_FMASK;
+ reg = ipa_reg(ipa, ENDP_STATUS);
+ if (endpoint->config.status_enable) {
+ val |= ipa_reg_bit(reg, STATUS_EN);
if (endpoint->toward_ipa) {
enum ipa_endpoint_name name;
u32 status_endpoint_id;
- name = endpoint->data->tx.status_endpoint;
+ name = endpoint->config.tx.status_endpoint;
status_endpoint_id = ipa->name_map[name]->endpoint_id;
- val |= u32_encode_bits(status_endpoint_id,
- STATUS_ENDP_FMASK);
+ val |= ipa_reg_encode(reg, STATUS_ENDP,
+ status_endpoint_id);
}
/* STATUS_LOCATION is 0, meaning status element precedes
- * packet (not present for IPA v4.5)
+ * packet (not present for IPA v4.5+)
*/
- /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */
+ /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
}
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
}
-static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
+static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
{
- struct gsi_trans *trans;
- bool doorbell = false;
struct page *page;
+ u32 buffer_size;
u32 offset;
u32 len;
int ret;
- page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE));
+ buffer_size = endpoint->config.rx.buffer_size;
+ page = dev_alloc_pages(get_order(buffer_size));
if (!page)
return -ENOMEM;
- trans = ipa_endpoint_trans_alloc(endpoint, 1);
- if (!trans)
- goto err_free_pages;
-
/* Offset the buffer to make space for skb headroom */
offset = NET_SKB_PAD;
- len = IPA_RX_BUFFER_SIZE - offset;
+ len = buffer_size - offset;
ret = gsi_trans_page_add(trans, page, len, offset);
if (ret)
- goto err_trans_free;
- trans->data = page; /* transaction owns page now */
-
- if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
- doorbell = true;
- endpoint->replenish_ready = 0;
- }
-
- gsi_trans_commit(trans, doorbell);
-
- return 0;
-
-err_trans_free:
- gsi_trans_free(trans);
-err_free_pages:
- __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
+ put_page(page);
+ else
+ trans->data = page; /* transaction owns page now */
- return -ENOMEM;
+ return ret;
}
/**
* ipa_endpoint_replenish() - Replenish endpoint receive buffers
* @endpoint: Endpoint to be replenished
- * @add_one: Whether this is replacing a just-consumed buffer
*
* The IPA hardware can hold a fixed number of receive buffers for an RX
* endpoint, based on the number of entries in the underlying channel ring
* buffer. If an endpoint's "backlog" is non-zero, it indicates how many
* more receive buffers can be supplied to the hardware. Replenishing for
- * an endpoint can be disabled, in which case requests to replenish a
- * buffer are "saved", and transferred to the backlog once it is re-enabled
- * again.
+ * an endpoint can be disabled, in which case buffers are not queued to
+ * the hardware.
*/
-static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one)
+static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
{
- struct gsi *gsi;
- u32 backlog;
+ struct gsi_trans *trans;
- if (!endpoint->replenish_enabled) {
- if (add_one)
- atomic_inc(&endpoint->replenish_saved);
+ if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
return;
- }
- while (atomic_dec_not_zero(&endpoint->replenish_backlog))
- if (ipa_endpoint_replenish_one(endpoint))
+ /* Skip it if it's already active */
+ if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
+ return;
+
+ while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
+ bool doorbell;
+
+ if (ipa_endpoint_replenish_one(endpoint, trans))
goto try_again_later;
- if (add_one)
- atomic_inc(&endpoint->replenish_backlog);
+
+
+ /* Ring the doorbell if we've got a full batch */
+ doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
+ gsi_trans_commit(trans, doorbell);
+ }
+
+ clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
return;
try_again_later:
- /* The last one didn't succeed, so fix the backlog */
- backlog = atomic_inc_return(&endpoint->replenish_backlog);
-
- if (add_one)
- atomic_inc(&endpoint->replenish_backlog);
+ gsi_trans_free(trans);
+ clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
/* Whenever a receive buffer transaction completes we'll try to
* replenish again. It's unlikely, but if we fail to supply even
* one buffer, nothing will trigger another replenish attempt.
- * Receive buffer transactions use one TRE, so schedule work to
- * try replenishing again if our backlog is *all* available TREs.
+ * If the hardware has no receive buffers queued, schedule work to
+ * try replenishing again.
*/
- gsi = &endpoint->ipa->gsi;
- if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
+ if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
schedule_delayed_work(&endpoint->replenish_work,
msecs_to_jiffies(1));
}
static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
{
- struct gsi *gsi = &endpoint->ipa->gsi;
- u32 max_backlog;
- u32 saved;
-
- endpoint->replenish_enabled = true;
- while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
- atomic_add(saved, &endpoint->replenish_backlog);
+ set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
/* Start replenishing if hardware currently has no buffers */
- max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
- if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
- ipa_endpoint_replenish(endpoint, false);
+ if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
+ ipa_endpoint_replenish(endpoint);
}
static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
{
- u32 backlog;
-
- endpoint->replenish_enabled = false;
- while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
- atomic_add(backlog, &endpoint->replenish_saved);
+ clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
}
static void ipa_endpoint_replenish_work(struct work_struct *work)
@@ -1133,7 +1250,7 @@ static void ipa_endpoint_replenish_work(struct work_struct *work)
endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
- ipa_endpoint_replenish(endpoint, false);
+ ipa_endpoint_replenish(endpoint);
}
static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
@@ -1141,32 +1258,33 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
{
struct sk_buff *skb;
+ if (!endpoint->netdev)
+ return;
+
skb = __dev_alloc_skb(len, GFP_ATOMIC);
if (skb) {
+ /* Copy the data into the socket buffer and receive it */
skb_put(skb, len);
memcpy(skb->data, data, len);
skb->truesize += extra;
}
- /* Now receive it, or drop it if there's no netdev */
- if (endpoint->netdev)
- ipa_modem_skb_rx(endpoint->netdev, skb);
- else if (skb)
- dev_kfree_skb_any(skb);
+ ipa_modem_skb_rx(endpoint->netdev, skb);
}
static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
struct page *page, u32 len)
{
+ u32 buffer_size = endpoint->config.rx.buffer_size;
struct sk_buff *skb;
/* Nothing to do if there's no netdev */
if (!endpoint->netdev)
return false;
- WARN_ON(len > SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE - NET_SKB_PAD));
+ WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
- skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
+ skb = build_skb(page_address(page), buffer_size);
if (skb) {
/* Reserve the headroom and account for the data */
skb_reserve(skb, NET_SKB_PAD);
@@ -1264,8 +1382,9 @@ static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
struct page *page, u32 total_len)
{
+ u32 buffer_size = endpoint->config.rx.buffer_size;
void *data = page_address(page) + NET_SKB_PAD;
- u32 unused = IPA_RX_BUFFER_SIZE - total_len;
+ u32 unused = buffer_size - total_len;
u32 resid = total_len;
while (resid) {
@@ -1293,10 +1412,10 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
* And if checksum offload is enabled a trailer containing
* computed checksum information will be appended.
*/
- align = endpoint->data->rx.pad_align ? : 1;
+ align = endpoint->config.rx.pad_align ? : 1;
len = le16_to_cpu(status->pkt_len);
len = sizeof(*status) + ALIGN(len, align);
- if (endpoint->data->checksum)
+ if (endpoint->config.checksum)
len += sizeof(struct rmnet_map_dl_csum_trailer);
if (!ipa_endpoint_status_drop(endpoint, status)) {
@@ -1323,38 +1442,25 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
}
}
-/* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
-static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
- struct gsi_trans *trans)
-{
-}
-
-/* Complete transaction initiated in ipa_endpoint_replenish_one() */
-static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
- struct gsi_trans *trans)
+void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
+ struct gsi_trans *trans)
{
struct page *page;
- ipa_endpoint_replenish(endpoint, true);
+ if (endpoint->toward_ipa)
+ return;
if (trans->cancelled)
- return;
+ goto done;
/* Parse or build a socket buffer using the actual received length */
page = trans->data;
- if (endpoint->data->status_enable)
+ if (endpoint->config.status_enable)
ipa_endpoint_status_parse(endpoint, page, trans->len);
else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
trans->data = NULL; /* Pages have been consumed */
-}
-
-void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
- struct gsi_trans *trans)
-{
- if (endpoint->toward_ipa)
- ipa_endpoint_tx_complete(endpoint, trans);
- else
- ipa_endpoint_rx_complete(endpoint, trans);
+done:
+ ipa_endpoint_replenish(endpoint);
}
void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
@@ -1374,22 +1480,24 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
struct page *page = trans->data;
if (page)
- __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
+ put_page(page);
}
}
void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
{
+ const struct ipa_reg *reg;
u32 val;
+ reg = ipa_reg(ipa, ROUTE);
/* ROUTE_DIS is 0 */
- val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
- val |= ROUTE_DEF_HDR_TABLE_FMASK;
- val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
- val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
- val |= ROUTE_DEF_RETAIN_HDR_FMASK;
+ val = ipa_reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
+ val |= ipa_reg_bit(reg, ROUTE_DEF_HDR_TABLE);
+ /* ROUTE_DEF_HDR_OFST is 0 */
+ val |= ipa_reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
+ val |= ipa_reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
- iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
void ipa_endpoint_default_route_clear(struct ipa *ipa)
@@ -1505,7 +1613,7 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
* All other cases just need to reset the underlying GSI channel.
*/
special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
- endpoint->data->aggregation;
+ endpoint->config.aggregation;
if (special && ipa_endpoint_aggr_active(endpoint))
ret = ipa_endpoint_reset_rx_aggr(endpoint);
else
@@ -1519,10 +1627,19 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
{
- if (endpoint->toward_ipa)
- ipa_endpoint_program_delay(endpoint, false);
- else
+ if (endpoint->toward_ipa) {
+ /* Newer versions of IPA use GSI channel flow control
+ * instead of endpoint DELAY mode to prevent sending data.
+ * Flow control is disabled for newly-allocated channels,
+ * and we can assume flow control is not (ever) enabled
+ * for AP TX channels.
+ */
+ if (endpoint->ipa->version < IPA_VERSION_4_2)
+ ipa_endpoint_program_delay(endpoint, false);
+ } else {
+ /* Ensure suspend mode is off on all AP RX endpoints */
(void)ipa_endpoint_program_suspend(endpoint, false);
+ }
ipa_endpoint_init_cfg(endpoint);
ipa_endpoint_init_nat(endpoint);
ipa_endpoint_init_hdr(endpoint);
@@ -1530,6 +1647,12 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
ipa_endpoint_init_hdr_metadata_mask(endpoint);
ipa_endpoint_init_mode(endpoint);
ipa_endpoint_init_aggr(endpoint);
+ if (!endpoint->toward_ipa) {
+ if (endpoint->config.rx.holb_drop)
+ ipa_endpoint_init_hol_block_enable(endpoint, 0);
+ else
+ ipa_endpoint_init_hol_block_disable(endpoint);
+ }
ipa_endpoint_init_deaggr(endpoint);
ipa_endpoint_init_rsrc_grp(endpoint);
ipa_endpoint_init_seq(endpoint);
@@ -1661,15 +1784,13 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
if (endpoint->ee_id != GSI_EE_AP)
return;
- endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
+ endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1;
if (!endpoint->toward_ipa) {
/* RX transactions require a single TRE, so the maximum
* backlog is the same as the maximum outstanding TREs.
*/
- endpoint->replenish_enabled = false;
- atomic_set(&endpoint->replenish_saved,
- gsi_channel_tre_max(gsi, endpoint->channel_id));
- atomic_set(&endpoint->replenish_backlog, 0);
+ clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
+ clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
INIT_DELAYED_WORK(&endpoint->replenish_work,
ipa_endpoint_replenish_work);
}
@@ -1720,6 +1841,7 @@ void ipa_endpoint_teardown(struct ipa *ipa)
int ipa_endpoint_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
+ const struct ipa_reg *reg;
u32 initialized;
u32 rx_base;
u32 rx_mask;
@@ -1746,11 +1868,12 @@ int ipa_endpoint_config(struct ipa *ipa)
/* Find out about the endpoints supplied by the hardware, and ensure
* the highest one doesn't exceed the number we support.
*/
- val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
+ reg = ipa_reg(ipa, FLAVOR_0);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
/* Our RX is an IPA producer */
- rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK);
- max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK);
+ rx_base = ipa_reg_decode(reg, PROD_LOWEST, val);
+ max = rx_base + ipa_reg_decode(reg, MAX_PROD_PIPES, val);
if (max > IPA_ENDPOINT_MAX) {
dev_err(dev, "too many endpoints (%u > %u)\n",
max, IPA_ENDPOINT_MAX);
@@ -1759,7 +1882,7 @@ int ipa_endpoint_config(struct ipa *ipa)
rx_mask = GENMASK(max - 1, rx_base);
/* Our TX is an IPA consumer */
- max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK);
+ max = ipa_reg_decode(reg, MAX_CONS_PIPES, val);
tx_mask = GENMASK(max - 1, 0);
ipa->available = rx_mask | tx_mask;
@@ -1811,7 +1934,7 @@ static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
endpoint->channel_id = data->channel_id;
endpoint->endpoint_id = data->endpoint_id;
endpoint->toward_ipa = data->toward_ipa;
- endpoint->data = &data->endpoint.config;
+ endpoint->config = data->endpoint.config;
ipa->initialized |= BIT(endpoint->endpoint_id);
}
@@ -1845,6 +1968,8 @@ u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
enum ipa_endpoint_name name;
u32 filter_map;
+ BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
+
if (!ipa_endpoint_data_valid(ipa, count, data))
return 0; /* Error */
@@ -1859,6 +1984,8 @@ u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
if (data->endpoint.filter_support)
filter_map |= BIT(data->endpoint_id);
+ if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
+ ipa->modem_tx_count++;
}
if (!ipa_filter_map_valid(ipa, filter_map))
diff --git a/drivers/net/ipa/ipa_endpoint.h b/drivers/net/ipa/ipa_endpoint.h
index 0a859d10312d..d8dfa24f5214 100644
--- a/drivers/net/ipa/ipa_endpoint.h
+++ b/drivers/net/ipa/ipa_endpoint.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_ENDPOINT_H_
#define _IPA_ENDPOINT_H_
@@ -41,20 +41,112 @@ enum ipa_endpoint_name {
#define IPA_ENDPOINT_MAX 32 /* Max supported by driver */
/**
+ * struct ipa_endpoint_tx - Endpoint configuration for TX endpoints
+ * @seq_type: primary packet processing sequencer type
+ * @seq_rep_type: sequencer type for replication processing
+ * @status_endpoint: endpoint to which status elements are sent
+ *
+ * The @status_endpoint is only valid if the endpoint's @status_enable
+ * flag is set.
+ */
+struct ipa_endpoint_tx {
+ enum ipa_seq_type seq_type;
+ enum ipa_seq_rep_type seq_rep_type;
+ enum ipa_endpoint_name status_endpoint;
+};
+
+/**
+ * struct ipa_endpoint_rx - Endpoint configuration for RX endpoints
+ * @buffer_size: requested receive buffer size (bytes)
+ * @pad_align: power-of-2 boundary to which packet payload is aligned
+ * @aggr_time_limit: time before aggregation closes (microseconds)
+ * @aggr_hard_limit: whether aggregation closes before or after boundary
+ * @aggr_close_eof: whether aggregation closes on end-of-frame
+ * @holb_drop: whether to drop packets to avoid head-of-line blocking
+ *
+ * The actual size of the receive buffer is rounded up if necessary
+ * to be a power-of-2 number of pages.
+ *
+ * With each packet it transfers, the IPA hardware can perform certain
+ * transformations of its packet data. One of these is adding pad bytes
+ * to the end of the packet data so the result ends on a power-of-2 boundary.
+ *
+ * It is also able to aggregate multiple packets into a single receive buffer.
+ * Aggregation is "open" while a buffer is being filled, and "closes" when
+ * certain criteria are met.
+ *
+ * A time limit can be specified to close aggregation. Aggregation will be
+ * closed if this period passes after data is first written into a receive
+ * buffer. If not specified, no time limit is imposed.
+ *
+ * Insufficient space available in the receive buffer can close aggregation.
+ * The aggregation byte limit defines the point (in units of 1024 bytes) in
+ * the buffer where aggregation closes. With a "soft" aggregation limit,
+ * aggregation closes when a packet written to the buffer *crosses* that
+ * aggregation limit. With a "hard" aggregation limit, aggregation will
+ * close *before* writing a packet that would cross that boundary.
+ */
+struct ipa_endpoint_rx {
+ u32 buffer_size;
+ u32 pad_align;
+ u32 aggr_time_limit;
+ bool aggr_hard_limit;
+ bool aggr_close_eof;
+ bool holb_drop;
+};
+
+/**
+ * struct ipa_endpoint_config - IPA endpoint hardware configuration
+ * @resource_group: resource group to assign endpoint to
+ * @checksum: whether checksum offload is enabled
+ * @qmap: whether endpoint uses QMAP protocol
+ * @aggregation: whether endpoint supports aggregation
+ * @status_enable: whether endpoint uses status elements
+ * @dma_mode: whether endpoint operates in DMA mode
+ * @dma_endpoint: peer endpoint, if operating in DMA mode
+ * @tx: TX-specific endpoint information (see above)
+ * @rx: RX-specific endpoint information (see above)
+ */
+struct ipa_endpoint_config {
+ u32 resource_group;
+ bool checksum;
+ bool qmap;
+ bool aggregation;
+ bool status_enable;
+ bool dma_mode;
+ enum ipa_endpoint_name dma_endpoint;
+ union {
+ struct ipa_endpoint_tx tx;
+ struct ipa_endpoint_rx rx;
+ };
+};
+
+/**
+ * enum ipa_replenish_flag: RX buffer replenish flags
+ *
+ * @IPA_REPLENISH_ENABLED: Whether receive buffer replenishing is enabled
+ * @IPA_REPLENISH_ACTIVE: Whether replenishing is underway
+ * @IPA_REPLENISH_COUNT: Number of defined replenish flags
+ */
+enum ipa_replenish_flag {
+ IPA_REPLENISH_ENABLED,
+ IPA_REPLENISH_ACTIVE,
+ IPA_REPLENISH_COUNT, /* Number of flags (must be last) */
+};
+
+/**
* struct ipa_endpoint - IPA endpoint information
* @ipa: IPA pointer
* @ee_id: Execution environmnent endpoint is associated with
* @channel_id: GSI channel used by the endpoint
* @endpoint_id: IPA endpoint number
* @toward_ipa: Endpoint direction (true = TX, false = RX)
- * @data: Endpoint configuration data
- * @trans_tre_max: Maximum number of TRE descriptors per transaction
+ * @config: Default endpoint configuration
+ * @skb_frag_max: Maximum allowed number of TX SKB fragments
* @evt_ring_id: GSI event ring used by the endpoint
* @netdev: Network device pointer, if endpoint uses one
- * @replenish_enabled: Whether receive buffer replenishing is enabled
- * @replenish_ready: Number of replenish transactions without doorbell
- * @replenish_saved: Replenish requests held while disabled
- * @replenish_backlog: Number of buffers needed to fill hardware queue
+ * @replenish_flags: Replenishing state flags
+ * @replenish_count: Total number of replenish transactions committed
* @replenish_work: Work item used for repeated replenish failures
*/
struct ipa_endpoint {
@@ -63,19 +155,17 @@ struct ipa_endpoint {
u32 channel_id;
u32 endpoint_id;
bool toward_ipa;
- const struct ipa_endpoint_config_data *data;
+ struct ipa_endpoint_config config;
- u32 trans_tre_max;
+ u32 skb_frag_max; /* Used for netdev TX only */
u32 evt_ring_id;
/* Net device this endpoint is associated with, if any */
struct net_device *netdev;
/* Receive buffer replenishing for RX endpoints */
- bool replenish_enabled;
- u32 replenish_ready;
- atomic_t replenish_saved;
- atomic_t replenish_backlog;
+ DECLARE_BITMAP(replenish_flags, IPA_REPLENISH_COUNT);
+ u64 replenish_count;
struct delayed_work replenish_work; /* global wq */
};
diff --git a/drivers/net/ipa/ipa_interrupt.c b/drivers/net/ipa/ipa_interrupt.c
index b35170a93b0f..c269432f9c2e 100644
--- a/drivers/net/ipa/ipa_interrupt.c
+++ b/drivers/net/ipa/ipa_interrupt.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
/* DOC: IPA Interrupts
@@ -53,13 +53,15 @@ static void ipa_interrupt_process(struct ipa_interrupt *interrupt, u32 irq_id)
{
bool uc_irq = ipa_interrupt_uc(interrupt, irq_id);
struct ipa *ipa = interrupt->ipa;
+ const struct ipa_reg *reg;
u32 mask = BIT(irq_id);
u32 offset;
/* For microcontroller interrupts, clear the interrupt right away,
* "to avoid clearing unhandled interrupts."
*/
- offset = ipa_reg_irq_clr_offset(ipa->version);
+ reg = ipa_reg(ipa, IPA_IRQ_CLR);
+ offset = ipa_reg_offset(reg);
if (uc_irq)
iowrite32(mask, ipa->reg_virt + offset);
@@ -80,6 +82,7 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
struct ipa_interrupt *interrupt = dev_id;
struct ipa *ipa = interrupt->ipa;
u32 enabled = interrupt->enabled;
+ const struct ipa_reg *reg;
struct device *dev;
u32 pending;
u32 offset;
@@ -95,7 +98,8 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
* including conditions whose interrupt is not enabled. Handle
* only the enabled ones.
*/
- offset = ipa_reg_irq_stts_offset(ipa->version);
+ reg = ipa_reg(ipa, IPA_IRQ_STTS);
+ offset = ipa_reg_offset(reg);
pending = ioread32(ipa->reg_virt + offset);
while ((mask = pending & enabled)) {
do {
@@ -112,7 +116,8 @@ static irqreturn_t ipa_isr_thread(int irq, void *dev_id)
if (pending) {
dev_dbg(dev, "clearing disabled IPA interrupts 0x%08x\n",
pending);
- offset = ipa_reg_irq_clr_offset(ipa->version);
+ reg = ipa_reg(ipa, IPA_IRQ_CLR);
+ offset = ipa_reg_offset(reg);
iowrite32(pending, ipa->reg_virt + offset);
}
out_power_put:
@@ -128,6 +133,7 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
{
struct ipa *ipa = interrupt->ipa;
u32 mask = BIT(endpoint_id);
+ const struct ipa_reg *reg;
u32 offset;
u32 val;
@@ -137,7 +143,8 @@ static void ipa_interrupt_suspend_control(struct ipa_interrupt *interrupt,
if (ipa->version == IPA_VERSION_3_0)
return;
- offset = ipa_reg_irq_suspend_en_offset(ipa->version);
+ reg = ipa_reg(ipa, IRQ_SUSPEND_EN);
+ offset = ipa_reg_offset(reg);
val = ioread32(ipa->reg_virt + offset);
if (enable)
val |= mask;
@@ -164,18 +171,18 @@ ipa_interrupt_suspend_disable(struct ipa_interrupt *interrupt, u32 endpoint_id)
void ipa_interrupt_suspend_clear_all(struct ipa_interrupt *interrupt)
{
struct ipa *ipa = interrupt->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
- offset = ipa_reg_irq_suspend_info_offset(ipa->version);
- val = ioread32(ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, IRQ_SUSPEND_INFO);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
/* SUSPEND interrupt status isn't cleared on IPA version 3.0 */
if (ipa->version == IPA_VERSION_3_0)
return;
- offset = ipa_reg_irq_suspend_clr_offset(ipa->version);
- iowrite32(val, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, IRQ_SUSPEND_CLR);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
/* Simulate arrival of an IPA TX_SUSPEND interrupt */
@@ -189,16 +196,18 @@ void ipa_interrupt_add(struct ipa_interrupt *interrupt,
enum ipa_irq_id ipa_irq, ipa_irq_handler_t handler)
{
struct ipa *ipa = interrupt->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
- WARN_ON(ipa_irq >= IPA_IRQ_COUNT);
+ if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT))
+ return;
interrupt->handler[ipa_irq] = handler;
/* Update the IPA interrupt mask to enable it */
interrupt->enabled |= BIT(ipa_irq);
- offset = ipa_reg_irq_en_offset(ipa->version);
- iowrite32(interrupt->enabled, ipa->reg_virt + offset);
+
+ reg = ipa_reg(ipa, IPA_IRQ_EN);
+ iowrite32(interrupt->enabled, ipa->reg_virt + ipa_reg_offset(reg));
}
/* Remove the handler for an IPA interrupt type */
@@ -206,14 +215,16 @@ void
ipa_interrupt_remove(struct ipa_interrupt *interrupt, enum ipa_irq_id ipa_irq)
{
struct ipa *ipa = interrupt->ipa;
- u32 offset;
+ const struct ipa_reg *reg;
- WARN_ON(ipa_irq >= IPA_IRQ_COUNT);
+ if (WARN_ON(ipa_irq >= IPA_IRQ_COUNT))
+ return;
/* Update the IPA interrupt mask to disable it */
interrupt->enabled &= ~BIT(ipa_irq);
- offset = ipa_reg_irq_en_offset(ipa->version);
- iowrite32(interrupt->enabled, ipa->reg_virt + offset);
+
+ reg = ipa_reg(ipa, IPA_IRQ_EN);
+ iowrite32(interrupt->enabled, ipa->reg_virt + ipa_reg_offset(reg));
interrupt->handler[ipa_irq] = NULL;
}
@@ -223,8 +234,8 @@ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
struct ipa_interrupt *interrupt;
+ const struct ipa_reg *reg;
unsigned int irq;
- u32 offset;
int ret;
ret = platform_get_irq_byname(ipa->pdev, "ipa");
@@ -242,8 +253,8 @@ struct ipa_interrupt *ipa_interrupt_config(struct ipa *ipa)
interrupt->irq = irq;
/* Start with all IPA interrupts disabled */
- offset = ipa_reg_irq_en_offset(ipa->version);
- iowrite32(0, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, IPA_IRQ_EN);
+ iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
ret = request_threaded_irq(irq, NULL, ipa_isr_thread, IRQF_ONESHOT,
"ipa", interrupt);
diff --git a/drivers/net/ipa/ipa_interrupt.h b/drivers/net/ipa/ipa_interrupt.h
index 231390cea52a..f31fd9965fdc 100644
--- a/drivers/net/ipa/ipa_interrupt.h
+++ b/drivers/net/ipa/ipa_interrupt.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_INTERRUPT_H_
#define _IPA_INTERRUPT_H_
diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c
index a448ec198bee..49537fccf6ad 100644
--- a/drivers/net/ipa/ipa_main.c
+++ b/drivers/net/ipa/ipa_main.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -183,31 +183,97 @@ static void ipa_teardown(struct ipa *ipa)
gsi_teardown(&ipa->gsi);
}
+static void
+ipa_hardware_config_bcr(struct ipa *ipa, const struct ipa_data *data)
+{
+ const struct ipa_reg *reg;
+ u32 val;
+
+ /* IPA v4.5+ has no backward compatibility register */
+ if (ipa->version >= IPA_VERSION_4_5)
+ return;
+
+ reg = ipa_reg(ipa, IPA_BCR);
+ val = data->backward_compat;
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+}
+
+static void ipa_hardware_config_tx(struct ipa *ipa)
+{
+ enum ipa_version version = ipa->version;
+ const struct ipa_reg *reg;
+ u32 offset;
+ u32 val;
+
+ if (version <= IPA_VERSION_4_0 || version >= IPA_VERSION_4_5)
+ return;
+
+ /* Disable PA mask to allow HOLB drop */
+ reg = ipa_reg(ipa, IPA_TX_CFG);
+ offset = ipa_reg_offset(reg);
+
+ val = ioread32(ipa->reg_virt + offset);
+
+ val &= ~ipa_reg_bit(reg, PA_MASK_EN);
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+static void ipa_hardware_config_clkon(struct ipa *ipa)
+{
+ enum ipa_version version = ipa->version;
+ const struct ipa_reg *reg;
+ u32 val;
+
+ if (version >= IPA_VERSION_4_5)
+ return;
+
+ if (version < IPA_VERSION_4_0 && version != IPA_VERSION_3_1)
+ return;
+
+ /* Implement some hardware workarounds */
+ reg = ipa_reg(ipa, CLKON_CFG);
+ if (version == IPA_VERSION_3_1) {
+ /* Disable MISC clock gating */
+ val = ipa_reg_bit(reg, CLKON_MISC);
+ } else { /* IPA v4.0+ */
+ /* Enable open global clocks in the CLKON configuration */
+ val = ipa_reg_bit(reg, CLKON_GLOBAL);
+ val |= ipa_reg_bit(reg, GLOBAL_2X_CLK);
+ }
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+}
+
/* Configure bus access behavior for IPA components */
static void ipa_hardware_config_comp(struct ipa *ipa)
{
+ const struct ipa_reg *reg;
+ u32 offset;
u32 val;
/* Nothing to configure prior to IPA v4.0 */
if (ipa->version < IPA_VERSION_4_0)
return;
- val = ioread32(ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET);
+ reg = ipa_reg(ipa, COMP_CFG);
+ offset = ipa_reg_offset(reg);
+ val = ioread32(ipa->reg_virt + offset);
if (ipa->version == IPA_VERSION_4_0) {
- val &= ~IPA_QMB_SELECT_CONS_EN_FMASK;
- val &= ~IPA_QMB_SELECT_PROD_EN_FMASK;
- val &= ~IPA_QMB_SELECT_GLOBAL_EN_FMASK;
+ val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_CONS_EN);
+ val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_PROD_EN);
+ val &= ~ipa_reg_bit(reg, IPA_QMB_SELECT_GLOBAL_EN);
} else if (ipa->version < IPA_VERSION_4_5) {
- val |= GSI_MULTI_AXI_MASTERS_DIS_FMASK;
+ val |= ipa_reg_bit(reg, GSI_MULTI_AXI_MASTERS_DIS);
} else {
- /* For IPA v4.5 IPA_FULL_FLUSH_WAIT_RSC_CLOSE_EN is 0 */
+ /* For IPA v4.5 FULL_FLUSH_WAIT_RS_CLOSURE_EN is 0 */
}
- val |= GSI_MULTI_INORDER_RD_DIS_FMASK;
- val |= GSI_MULTI_INORDER_WR_DIS_FMASK;
+ val |= ipa_reg_bit(reg, GSI_MULTI_INORDER_RD_DIS);
+ val |= ipa_reg_bit(reg, GSI_MULTI_INORDER_WR_DIS);
- iowrite32(val, ipa->reg_virt + IPA_REG_COMP_CFG_OFFSET);
+ iowrite32(val, ipa->reg_virt + offset);
}
/* Configure DDR and (possibly) PCIe max read/write QSB values */
@@ -216,6 +282,7 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
{
const struct ipa_qsb_data *data0;
const struct ipa_qsb_data *data1;
+ const struct ipa_reg *reg;
u32 val;
/* QMB 0 represents DDR; QMB 1 (if present) represents PCIe */
@@ -224,25 +291,31 @@ ipa_hardware_config_qsb(struct ipa *ipa, const struct ipa_data *data)
data1 = &data->qsb_data[IPA_QSB_MASTER_PCIE];
/* Max outstanding write accesses for QSB masters */
- val = u32_encode_bits(data0->max_writes, GEN_QMB_0_MAX_WRITES_FMASK);
+ reg = ipa_reg(ipa, QSB_MAX_WRITES);
+
+ val = ipa_reg_encode(reg, GEN_QMB_0_MAX_WRITES, data0->max_writes);
if (data->qsb_count > 1)
- val |= u32_encode_bits(data1->max_writes,
- GEN_QMB_1_MAX_WRITES_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_WRITES_OFFSET);
+ val |= ipa_reg_encode(reg, GEN_QMB_1_MAX_WRITES,
+ data1->max_writes);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
/* Max outstanding read accesses for QSB masters */
- val = u32_encode_bits(data0->max_reads, GEN_QMB_0_MAX_READS_FMASK);
+ reg = ipa_reg(ipa, QSB_MAX_READS);
+
+ val = ipa_reg_encode(reg, GEN_QMB_0_MAX_READS, data0->max_reads);
if (ipa->version >= IPA_VERSION_4_0)
- val |= u32_encode_bits(data0->max_reads_beats,
- GEN_QMB_0_MAX_READS_BEATS_FMASK);
+ val |= ipa_reg_encode(reg, GEN_QMB_0_MAX_READS_BEATS,
+ data0->max_reads_beats);
if (data->qsb_count > 1) {
- val |= u32_encode_bits(data1->max_reads,
- GEN_QMB_1_MAX_READS_FMASK);
+ val = ipa_reg_encode(reg, GEN_QMB_1_MAX_READS,
+ data1->max_reads);
if (ipa->version >= IPA_VERSION_4_0)
- val |= u32_encode_bits(data1->max_reads_beats,
- GEN_QMB_1_MAX_READS_BEATS_FMASK);
+ val |= ipa_reg_encode(reg, GEN_QMB_1_MAX_READS_BEATS,
+ data1->max_reads_beats);
}
- iowrite32(val, ipa->reg_virt + IPA_REG_QSB_MAX_READS_OFFSET);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
/* The internal inactivity timer clock is used for the aggregation timer */
@@ -278,48 +351,99 @@ static __always_inline u32 ipa_aggr_granularity_val(u32 usec)
*/
static void ipa_qtime_config(struct ipa *ipa)
{
+ const struct ipa_reg *reg;
+ u32 offset;
u32 val;
/* Timer clock divider must be disabled when we change the rate */
- iowrite32(0, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET);
+ reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
+ iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
+ reg = ipa_reg(ipa, QTIME_TIMESTAMP_CFG);
/* Set DPL time stamp resolution to use Qtime (instead of 1 msec) */
- val = u32_encode_bits(DPL_TIMESTAMP_SHIFT, DPL_TIMESTAMP_LSB_FMASK);
- val |= u32_encode_bits(1, DPL_TIMESTAMP_SEL_FMASK);
+ val = ipa_reg_encode(reg, DPL_TIMESTAMP_LSB, DPL_TIMESTAMP_SHIFT);
+ val |= ipa_reg_bit(reg, DPL_TIMESTAMP_SEL);
/* Configure tag and NAT Qtime timestamp resolution as well */
- val |= u32_encode_bits(TAG_TIMESTAMP_SHIFT, TAG_TIMESTAMP_LSB_FMASK);
- val |= u32_encode_bits(NAT_TIMESTAMP_SHIFT, NAT_TIMESTAMP_LSB_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_QTIME_TIMESTAMP_CFG_OFFSET);
+ val = ipa_reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT);
+ val = ipa_reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
/* Set granularity of pulse generators used for other timers */
- val = u32_encode_bits(IPA_GRAN_100_US, GRAN_0_FMASK);
- val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_1_FMASK);
- val |= u32_encode_bits(IPA_GRAN_1_MS, GRAN_2_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_PULSE_GRAN_CFG_OFFSET);
+ reg = ipa_reg(ipa, TIMERS_PULSE_GRAN_CFG);
+ val = ipa_reg_encode(reg, PULSE_GRAN_0, IPA_GRAN_100_US);
+ val |= ipa_reg_encode(reg, PULSE_GRAN_1, IPA_GRAN_1_MS);
+ val |= ipa_reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_1_MS);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
/* Actual divider is 1 more than value supplied here */
- val = u32_encode_bits(IPA_XO_CLOCK_DIVIDER - 1, DIV_VALUE_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET);
+ reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG);
+ offset = ipa_reg_offset(reg);
+ val = ipa_reg_encode(reg, DIV_VALUE, IPA_XO_CLOCK_DIVIDER - 1);
+
+ iowrite32(val, ipa->reg_virt + offset);
/* Divider value is set; re-enable the common timer clock divider */
- val |= u32_encode_bits(1, DIV_ENABLE_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET);
+ val |= ipa_reg_bit(reg, DIV_ENABLE);
+
+ iowrite32(val, ipa->reg_virt + offset);
+}
+
+/* Before IPA v4.5 timing is controlled by a counter register */
+static void ipa_hardware_config_counter(struct ipa *ipa)
+{
+ u32 granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
+ const struct ipa_reg *reg;
+ u32 val;
+
+ reg = ipa_reg(ipa, COUNTER_CFG);
+ /* If defined, EOT_COAL_GRANULARITY is 0 */
+ val = ipa_reg_encode(reg, AGGR_GRANULARITY, granularity);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
+}
+
+static void ipa_hardware_config_timing(struct ipa *ipa)
+{
+ if (ipa->version < IPA_VERSION_4_5)
+ ipa_hardware_config_counter(ipa);
+ else
+ ipa_qtime_config(ipa);
+}
+
+static void ipa_hardware_config_hashing(struct ipa *ipa)
+{
+ const struct ipa_reg *reg;
+
+ if (ipa->version != IPA_VERSION_4_2)
+ return;
+
+ /* IPA v4.2 does not support hashed tables, so disable them */
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_EN);
+
+ /* IPV6_ROUTER_HASH, IPV6_FILTER_HASH, IPV4_ROUTER_HASH,
+ * IPV4_FILTER_HASH are all zero.
+ */
+ iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg));
}
static void ipa_idle_indication_cfg(struct ipa *ipa,
u32 enter_idle_debounce_thresh,
bool const_non_idle_enable)
{
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
- val = u32_encode_bits(enter_idle_debounce_thresh,
- ENTER_IDLE_DEBOUNCE_THRESH_FMASK);
+ if (ipa->version < IPA_VERSION_3_5_1)
+ return;
+
+ reg = ipa_reg(ipa, IDLE_INDICATION_CFG);
+ val = ipa_reg_encode(reg, ENTER_IDLE_DEBOUNCE_THRESH,
+ enter_idle_debounce_thresh);
if (const_non_idle_enable)
- val |= CONST_NON_IDLE_ENABLE_FMASK;
+ val |= ipa_reg_bit(reg, CONST_NON_IDLE_ENABLE);
- offset = ipa_reg_idle_indication_cfg_offset(ipa->version);
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
/**
@@ -349,55 +473,13 @@ static void ipa_hardware_dcd_deconfig(struct ipa *ipa)
*/
static void ipa_hardware_config(struct ipa *ipa, const struct ipa_data *data)
{
- enum ipa_version version = ipa->version;
- u32 granularity;
- u32 val;
-
- /* IPA v4.5+ has no backward compatibility register */
- if (version < IPA_VERSION_4_5) {
- val = data->backward_compat;
- iowrite32(val, ipa->reg_virt + IPA_REG_BCR_OFFSET);
- }
-
- /* Implement some hardware workarounds */
- if (version >= IPA_VERSION_4_0 && version < IPA_VERSION_4_5) {
- /* Disable PA mask to allow HOLB drop */
- val = ioread32(ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
- val &= ~PA_MASK_EN_FMASK;
- iowrite32(val, ipa->reg_virt + IPA_REG_TX_CFG_OFFSET);
-
- /* Enable open global clocks in the CLKON configuration */
- val = GLOBAL_FMASK | GLOBAL_2X_CLK_FMASK;
- } else if (version == IPA_VERSION_3_1) {
- val = MISC_FMASK; /* Disable MISC clock gating */
- } else {
- val = 0; /* No CLKON configuration needed */
- }
- if (val)
- iowrite32(val, ipa->reg_virt + IPA_REG_CLKON_CFG_OFFSET);
-
+ ipa_hardware_config_bcr(ipa, data);
+ ipa_hardware_config_tx(ipa);
+ ipa_hardware_config_clkon(ipa);
ipa_hardware_config_comp(ipa);
-
- /* Configure system bus limits */
ipa_hardware_config_qsb(ipa, data);
-
- if (version < IPA_VERSION_4_5) {
- /* Configure aggregation timer granularity */
- granularity = ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY);
- val = u32_encode_bits(granularity, AGGR_GRANULARITY_FMASK);
- iowrite32(val, ipa->reg_virt + IPA_REG_COUNTER_CFG_OFFSET);
- } else {
- ipa_qtime_config(ipa);
- }
-
- /* IPA v4.2 does not support hashed tables, so disable them */
- if (version == IPA_VERSION_4_2) {
- u32 offset = ipa_reg_filt_rout_hash_en_offset(version);
-
- iowrite32(0, ipa->reg_virt + offset);
- }
-
- /* Enable dynamic clock division */
+ ipa_hardware_config_timing(ipa);
+ ipa_hardware_config_hashing(ipa);
ipa_hardware_dcd_config(ipa);
}
@@ -612,29 +694,6 @@ static void ipa_validate_build(void)
/* Aggregation granularity value can't be 0, and must fit */
BUILD_BUG_ON(!ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY));
- BUILD_BUG_ON(ipa_aggr_granularity_val(IPA_AGGR_GRANULARITY) >
- field_max(AGGR_GRANULARITY_FMASK));
-}
-
-static bool ipa_version_valid(enum ipa_version version)
-{
- switch (version) {
- case IPA_VERSION_3_0:
- case IPA_VERSION_3_1:
- case IPA_VERSION_3_5:
- case IPA_VERSION_3_5_1:
- case IPA_VERSION_4_0:
- case IPA_VERSION_4_1:
- case IPA_VERSION_4_2:
- case IPA_VERSION_4_5:
- case IPA_VERSION_4_7:
- case IPA_VERSION_4_9:
- case IPA_VERSION_4_11:
- return true;
-
- default:
- return false;
- }
}
/**
@@ -678,8 +737,8 @@ static int ipa_probe(struct platform_device *pdev)
return -ENODEV;
}
- if (!ipa_version_valid(data->version)) {
- dev_err(dev, "invalid IPA version\n");
+ if (!ipa_version_supported(data->version)) {
+ dev_err(dev, "unsupported IPA version %u\n", data->version);
return -EINVAL;
}
@@ -734,7 +793,7 @@ static int ipa_probe(struct platform_device *pdev)
if (ret)
goto err_endpoint_exit;
- ret = ipa_modem_init(ipa, modem_init);
+ ret = ipa_smp2p_init(ipa, modem_init);
if (ret)
goto err_table_exit;
@@ -776,7 +835,7 @@ err_deconfig:
ipa_deconfig(ipa);
err_power_put:
pm_runtime_put_noidle(dev);
- ipa_modem_exit(ipa);
+ ipa_smp2p_exit(ipa);
err_table_exit:
ipa_table_exit(ipa);
err_endpoint_exit:
@@ -827,7 +886,7 @@ static int ipa_remove(struct platform_device *pdev)
ipa_deconfig(ipa);
out_power_put:
pm_runtime_put_noidle(dev);
- ipa_modem_exit(ipa);
+ ipa_smp2p_exit(ipa);
ipa_table_exit(ipa);
ipa_endpoint_exit(ipa);
gsi_exit(&ipa->gsi);
@@ -836,6 +895,8 @@ out_power_put:
kfree(ipa);
ipa_power_exit(power);
+ dev_info(dev, "IPA driver removed");
+
return 0;
}
@@ -851,6 +912,7 @@ static void ipa_shutdown(struct platform_device *pdev)
static const struct attribute_group *ipa_attribute_groups[] = {
&ipa_attribute_group,
&ipa_feature_attribute_group,
+ &ipa_endpoint_id_attribute_group,
&ipa_modem_attribute_group,
NULL,
};
diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c
index 4337b0920d3d..f84c6830495a 100644
--- a/drivers/net/ipa/ipa_mem.c
+++ b/drivers/net/ipa/ipa_mem.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -75,6 +75,7 @@ ipa_mem_zero_region_add(struct gsi_trans *trans, enum ipa_mem_id mem_id)
int ipa_mem_setup(struct ipa *ipa)
{
dma_addr_t addr = ipa->zero_addr;
+ const struct ipa_reg *reg;
const struct ipa_mem *mem;
struct gsi_trans *trans;
u32 offset;
@@ -112,8 +113,10 @@ int ipa_mem_setup(struct ipa *ipa)
/* Tell the hardware where the processing context area is located */
mem = ipa_mem_find(ipa, IPA_MEM_MODEM_PROC_CTX);
offset = ipa->mem_offset + mem->offset;
- val = proc_cntxt_base_addr_encoded(ipa->version, offset);
- iowrite32(val, ipa->reg_virt + IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET);
+
+ reg = ipa_reg(ipa, LOCAL_PKT_PROC_CNTXT);
+ val = ipa_reg_encode(reg, IPA_BASE_ADDR, offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
return 0;
}
@@ -266,9 +269,7 @@ static bool ipa_mem_valid(struct ipa *ipa, const struct ipa_mem_data *mem_data)
}
/* Now see if any required regions are not defined */
- for (mem_id = find_first_zero_bit(regions, IPA_MEM_COUNT);
- mem_id < IPA_MEM_COUNT;
- mem_id = find_next_zero_bit(regions, IPA_MEM_COUNT, mem_id + 1)) {
+ for_each_clear_bit(mem_id, regions, IPA_MEM_COUNT) {
if (ipa_mem_id_required(ipa, mem_id))
dev_err(dev, "required memory region %u missing\n",
mem_id);
@@ -308,6 +309,7 @@ static bool ipa_mem_size_valid(struct ipa *ipa)
int ipa_mem_config(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
+ const struct ipa_reg *reg;
const struct ipa_mem *mem;
dma_addr_t addr;
u32 mem_size;
@@ -316,12 +318,14 @@ int ipa_mem_config(struct ipa *ipa)
u32 i;
/* Check the advertised location and size of the shared memory area */
- val = ioread32(ipa->reg_virt + IPA_REG_SHARED_MEM_SIZE_OFFSET);
+ reg = ipa_reg(ipa, SHARED_MEM_SIZE);
+ val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
/* The fields in the register are in 8 byte units */
- ipa->mem_offset = 8 * u32_get_bits(val, SHARED_MEM_BADDR_FMASK);
+ ipa->mem_offset = 8 * ipa_reg_decode(reg, MEM_BADDR, val);
+
/* Make sure the end is within the region's mapped space */
- mem_size = 8 * u32_get_bits(val, SHARED_MEM_SIZE_FMASK);
+ mem_size = 8 * ipa_reg_decode(reg, MEM_SIZE, val);
/* If the sizes don't match, issue a warning */
if (ipa->mem_offset + mem_size < ipa->mem_size) {
@@ -570,7 +574,7 @@ static int ipa_smem_init(struct ipa *ipa, u32 item, size_t size)
}
/* Align the address down and the size up to a page boundary */
- addr = qcom_smem_virt_to_phys(virt) & PAGE_MASK;
+ addr = qcom_smem_virt_to_phys(virt);
phys = addr & PAGE_MASK;
size = PAGE_ALIGN(size + addr - phys);
iova = phys; /* We just want a direct mapping */
diff --git a/drivers/net/ipa/ipa_modem.c b/drivers/net/ipa/ipa_modem.c
index d0ab4d70c303..423422a2a445 100644
--- a/drivers/net/ipa/ipa_modem.c
+++ b/drivers/net/ipa/ipa_modem.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/errno.h>
@@ -9,6 +9,8 @@
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/if_rmnet.h>
+#include <linux/etherdevice.h>
+#include <net/pkt_sched.h>
#include <linux/pm_runtime.h>
#include <linux/remoteproc/qcom_rproc.h>
@@ -127,7 +129,7 @@ ipa_start_xmit(struct sk_buff *skb, struct net_device *netdev)
goto err_drop_skb;
endpoint = ipa->name_map[IPA_ENDPOINT_AP_MODEM_TX];
- if (endpoint->data->qmap && skb->protocol != htons(ETH_P_MAP))
+ if (endpoint->config.qmap && skb->protocol != htons(ETH_P_MAP))
goto err_drop_skb;
/* The hardware must be powered for us to transmit */
@@ -203,15 +205,20 @@ static const struct net_device_ops ipa_modem_ops = {
static void ipa_modem_netdev_setup(struct net_device *netdev)
{
netdev->netdev_ops = &ipa_modem_ops;
- ether_setup(netdev);
- /* No header ops (override value set by ether_setup()) */
+
netdev->header_ops = NULL;
netdev->type = ARPHRD_RAWIP;
netdev->hard_header_len = 0;
+ netdev->min_header_len = ETH_HLEN;
+ netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = IPA_MTU;
netdev->mtu = netdev->max_mtu;
netdev->addr_len = 0;
+ netdev->tx_queue_len = DEFAULT_TX_QUEUE_LEN;
netdev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+ netdev->priv_flags |= IFF_TX_SKB_SHARING;
+ eth_broadcast_addr(netdev->broadcast);
+
/* The endpoint is configured for QMAP */
netdev->needed_headroom = sizeof(struct rmnet_map_header);
netdev->needed_tailroom = IPA_NETDEV_TAILROOM;
@@ -442,16 +449,6 @@ static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
return NOTIFY_OK;
}
-int ipa_modem_init(struct ipa *ipa, bool modem_init)
-{
- return ipa_smp2p_init(ipa, modem_init);
-}
-
-void ipa_modem_exit(struct ipa *ipa)
-{
- ipa_smp2p_exit(ipa);
-}
-
int ipa_modem_config(struct ipa *ipa)
{
void *notifier;
diff --git a/drivers/net/ipa/ipa_modem.h b/drivers/net/ipa/ipa_modem.h
index 5e6e3d234454..d85718db9a57 100644
--- a/drivers/net/ipa/ipa_modem.h
+++ b/drivers/net/ipa/ipa_modem.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_MODEM_H_
#define _IPA_MODEM_H_
@@ -18,9 +18,6 @@ void ipa_modem_skb_rx(struct net_device *netdev, struct sk_buff *skb);
void ipa_modem_suspend(struct net_device *netdev);
void ipa_modem_resume(struct net_device *netdev);
-int ipa_modem_init(struct ipa *ipa, bool modem_init);
-void ipa_modem_exit(struct ipa *ipa);
-
int ipa_modem_config(struct ipa *ipa);
void ipa_modem_deconfig(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_power.c b/drivers/net/ipa/ipa_power.c
index b1c6c0fcb654..8420f93128a2 100644
--- a/drivers/net/ipa/ipa_power.c
+++ b/drivers/net/ipa/ipa_power.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/clk.h>
@@ -11,6 +11,8 @@
#include <linux/pm_runtime.h>
#include <linux/bitops.h>
+#include "linux/soc/qcom/qcom_aoss.h"
+
#include "ipa.h"
#include "ipa_power.h"
#include "ipa_endpoint.h"
@@ -33,18 +35,6 @@
#define IPA_AUTOSUSPEND_DELAY 500 /* milliseconds */
/**
- * struct ipa_interconnect - IPA interconnect information
- * @path: Interconnect path
- * @average_bandwidth: Average interconnect bandwidth (KB/second)
- * @peak_bandwidth: Peak interconnect bandwidth (KB/second)
- */
-struct ipa_interconnect {
- struct icc_path *path;
- u32 average_bandwidth;
- u32 peak_bandwidth;
-};
-
-/**
* enum ipa_power_flag - IPA power flags
* @IPA_POWER_FLAG_RESUMED: Whether resume from suspend has been signaled
* @IPA_POWER_FLAG_SYSTEM: Hardware is system (not runtime) suspended
@@ -64,6 +54,7 @@ enum ipa_power_flag {
* struct ipa_power - IPA power management information
* @dev: IPA device pointer
* @core: IPA core clock
+ * @qmp: QMP handle for AOSS communication
* @spinlock: Protects modem TX queue enable/disable
* @flags: Boolean state flags
* @interconnect_count: Number of elements in interconnect[]
@@ -72,167 +63,82 @@ enum ipa_power_flag {
struct ipa_power {
struct device *dev;
struct clk *core;
+ struct qmp *qmp;
spinlock_t spinlock; /* used with STOPPED/STARTED power flags */
DECLARE_BITMAP(flags, IPA_POWER_FLAG_COUNT);
u32 interconnect_count;
- struct ipa_interconnect *interconnect;
+ struct icc_bulk_data interconnect[];
};
-static int ipa_interconnect_init_one(struct device *dev,
- struct ipa_interconnect *interconnect,
- const struct ipa_interconnect_data *data)
-{
- struct icc_path *path;
-
- path = of_icc_get(dev, data->name);
- if (IS_ERR(path)) {
- int ret = PTR_ERR(path);
-
- dev_err_probe(dev, ret, "error getting %s interconnect\n",
- data->name);
-
- return ret;
- }
-
- interconnect->path = path;
- interconnect->average_bandwidth = data->average_bandwidth;
- interconnect->peak_bandwidth = data->peak_bandwidth;
-
- return 0;
-}
-
-static void ipa_interconnect_exit_one(struct ipa_interconnect *interconnect)
-{
- icc_put(interconnect->path);
- memset(interconnect, 0, sizeof(*interconnect));
-}
-
/* Initialize interconnects required for IPA operation */
-static int ipa_interconnect_init(struct ipa_power *power, struct device *dev,
+static int ipa_interconnect_init(struct ipa_power *power,
const struct ipa_interconnect_data *data)
{
- struct ipa_interconnect *interconnect;
- u32 count;
- int ret;
-
- count = power->interconnect_count;
- interconnect = kcalloc(count, sizeof(*interconnect), GFP_KERNEL);
- if (!interconnect)
- return -ENOMEM;
- power->interconnect = interconnect;
-
- while (count--) {
- ret = ipa_interconnect_init_one(dev, interconnect, data++);
- if (ret)
- goto out_unwind;
- interconnect++;
- }
-
- return 0;
-
-out_unwind:
- while (interconnect-- > power->interconnect)
- ipa_interconnect_exit_one(interconnect);
- kfree(power->interconnect);
- power->interconnect = NULL;
-
- return ret;
-}
-
-/* Inverse of ipa_interconnect_init() */
-static void ipa_interconnect_exit(struct ipa_power *power)
-{
- struct ipa_interconnect *interconnect;
-
- interconnect = power->interconnect + power->interconnect_count;
- while (interconnect-- > power->interconnect)
- ipa_interconnect_exit_one(interconnect);
- kfree(power->interconnect);
- power->interconnect = NULL;
-}
-
-/* Currently we only use one bandwidth level, so just "enable" interconnects */
-static int ipa_interconnect_enable(struct ipa *ipa)
-{
- struct ipa_interconnect *interconnect;
- struct ipa_power *power = ipa->power;
+ struct icc_bulk_data *interconnect;
int ret;
u32 i;
- interconnect = power->interconnect;
+ /* Initialize our interconnect data array for bulk operations */
+ interconnect = &power->interconnect[0];
for (i = 0; i < power->interconnect_count; i++) {
- ret = icc_set_bw(interconnect->path,
- interconnect->average_bandwidth,
- interconnect->peak_bandwidth);
- if (ret) {
- dev_err(&ipa->pdev->dev,
- "error %d enabling %s interconnect\n",
- ret, icc_get_name(interconnect->path));
- goto out_unwind;
- }
+ /* interconnect->path is filled in by of_icc_bulk_get() */
+ interconnect->name = data->name;
+ interconnect->avg_bw = data->average_bandwidth;
+ interconnect->peak_bw = data->peak_bandwidth;
+ data++;
interconnect++;
}
- return 0;
+ ret = of_icc_bulk_get(power->dev, power->interconnect_count,
+ power->interconnect);
+ if (ret)
+ return ret;
-out_unwind:
- while (interconnect-- > power->interconnect)
- (void)icc_set_bw(interconnect->path, 0, 0);
+ /* All interconnects are initially disabled */
+ icc_bulk_disable(power->interconnect_count, power->interconnect);
+
+ /* Set the bandwidth values to be used when enabled */
+ ret = icc_bulk_set_bw(power->interconnect_count, power->interconnect);
+ if (ret)
+ icc_bulk_put(power->interconnect_count, power->interconnect);
return ret;
}
-/* To disable an interconnect, we just its bandwidth to 0 */
-static int ipa_interconnect_disable(struct ipa *ipa)
+/* Inverse of ipa_interconnect_init() */
+static void ipa_interconnect_exit(struct ipa_power *power)
{
- struct ipa_interconnect *interconnect;
- struct ipa_power *power = ipa->power;
- struct device *dev = &ipa->pdev->dev;
- int result = 0;
- u32 count;
- int ret;
-
- count = power->interconnect_count;
- interconnect = power->interconnect + count;
- while (count--) {
- interconnect--;
- ret = icc_set_bw(interconnect->path, 0, 0);
- if (ret) {
- dev_err(dev, "error %d disabling %s interconnect\n",
- ret, icc_get_name(interconnect->path));
- /* Try to disable all; record only the first error */
- if (!result)
- result = ret;
- }
- }
-
- return result;
+ icc_bulk_put(power->interconnect_count, power->interconnect);
}
/* Enable IPA power, enabling interconnects and the core clock */
static int ipa_power_enable(struct ipa *ipa)
{
+ struct ipa_power *power = ipa->power;
int ret;
- ret = ipa_interconnect_enable(ipa);
+ ret = icc_bulk_enable(power->interconnect_count, power->interconnect);
if (ret)
return ret;
- ret = clk_prepare_enable(ipa->power->core);
+ ret = clk_prepare_enable(power->core);
if (ret) {
- dev_err(&ipa->pdev->dev, "error %d enabling core clock\n", ret);
- (void)ipa_interconnect_disable(ipa);
+ dev_err(power->dev, "error %d enabling core clock\n", ret);
+ icc_bulk_disable(power->interconnect_count,
+ power->interconnect);
}
return ret;
}
/* Inverse of ipa_power_enable() */
-static int ipa_power_disable(struct ipa *ipa)
+static void ipa_power_disable(struct ipa *ipa)
{
- clk_disable_unprepare(ipa->power->core);
+ struct ipa_power *power = ipa->power;
- return ipa_interconnect_disable(ipa);
+ clk_disable_unprepare(power->core);
+
+ icc_bulk_disable(power->interconnect_count, power->interconnect);
}
static int ipa_runtime_suspend(struct device *dev)
@@ -246,7 +152,9 @@ static int ipa_runtime_suspend(struct device *dev)
gsi_suspend(&ipa->gsi);
}
- return ipa_power_disable(ipa);
+ ipa_power_disable(ipa);
+
+ return 0;
}
static int ipa_runtime_resume(struct device *dev)
@@ -382,6 +290,47 @@ void ipa_power_modem_queue_active(struct ipa *ipa)
clear_bit(IPA_POWER_FLAG_STARTED, ipa->power->flags);
}
+static int ipa_power_retention_init(struct ipa_power *power)
+{
+ struct qmp *qmp = qmp_get(power->dev);
+
+ if (IS_ERR(qmp)) {
+ if (PTR_ERR(qmp) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ /* We assume any other error means it's not defined/needed */
+ qmp = NULL;
+ }
+ power->qmp = qmp;
+
+ return 0;
+}
+
+static void ipa_power_retention_exit(struct ipa_power *power)
+{
+ qmp_put(power->qmp);
+ power->qmp = NULL;
+}
+
+/* Control register retention on power collapse */
+void ipa_power_retention(struct ipa *ipa, bool enable)
+{
+ static const char fmt[] = "{ class: bcm, res: ipa_pc, val: %c }";
+ struct ipa_power *power = ipa->power;
+ char buf[36]; /* Exactly enough for fmt[]; size a multiple of 4 */
+ int ret;
+
+ if (!power->qmp)
+ return; /* Not needed on this platform */
+
+ (void)snprintf(buf, sizeof(buf), fmt, enable ? '1' : '0');
+
+ ret = qmp_send(power->qmp, buf, sizeof(buf));
+ if (ret)
+ dev_err(power->dev, "error %d sending QMP %sable request\n",
+ ret, enable ? "en" : "dis");
+}
+
int ipa_power_setup(struct ipa *ipa)
{
int ret;
@@ -408,6 +357,7 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
{
struct ipa_power *power;
struct clk *clk;
+ size_t size;
int ret;
clk = clk_get(dev, "core");
@@ -424,7 +374,8 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
goto err_clk_put;
}
- power = kzalloc(sizeof(*power), GFP_KERNEL);
+ size = struct_size(power, interconnect, data->interconnect_count);
+ power = kzalloc(size, GFP_KERNEL);
if (!power) {
ret = -ENOMEM;
goto err_clk_put;
@@ -434,16 +385,22 @@ ipa_power_init(struct device *dev, const struct ipa_power_data *data)
spin_lock_init(&power->spinlock);
power->interconnect_count = data->interconnect_count;
- ret = ipa_interconnect_init(power, dev, data->interconnect_data);
+ ret = ipa_interconnect_init(power, data->interconnect_data);
if (ret)
goto err_kfree;
+ ret = ipa_power_retention_init(power);
+ if (ret)
+ goto err_interconnect_exit;
+
pm_runtime_set_autosuspend_delay(dev, IPA_AUTOSUSPEND_DELAY);
pm_runtime_use_autosuspend(dev);
pm_runtime_enable(dev);
return power;
+err_interconnect_exit:
+ ipa_interconnect_exit(power);
err_kfree:
kfree(power);
err_clk_put:
@@ -460,6 +417,7 @@ void ipa_power_exit(struct ipa_power *power)
pm_runtime_disable(dev);
pm_runtime_dont_use_autosuspend(dev);
+ ipa_power_retention_exit(power);
ipa_interconnect_exit(power);
kfree(power);
clk_put(clk);
diff --git a/drivers/net/ipa/ipa_power.h b/drivers/net/ipa/ipa_power.h
index 2151805d7fbb..896f052e51a1 100644
--- a/drivers/net/ipa/ipa_power.h
+++ b/drivers/net/ipa/ipa_power.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_POWER_H_
#define _IPA_POWER_H_
@@ -41,6 +41,13 @@ void ipa_power_modem_queue_wake(struct ipa *ipa);
void ipa_power_modem_queue_active(struct ipa *ipa);
/**
+ * ipa_power_retention() - Control register retention on power collapse
+ * @ipa: IPA pointer
+ * @enable: Whether retention should be enabled or disabled
+ */
+void ipa_power_retention(struct ipa *ipa, bool enable);
+
+/**
* ipa_power_setup() - Set up IPA power management
* @ipa: IPA pointer
*
diff --git a/drivers/net/ipa/ipa_qmi.c b/drivers/net/ipa/ipa_qmi.c
index 90f3aec55b36..8295fd4b70d1 100644
--- a/drivers/net/ipa/ipa_qmi.c
+++ b/drivers/net/ipa/ipa_qmi.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -125,7 +125,7 @@ static void ipa_qmi_indication(struct ipa_qmi *ipa_qmi)
*/
static void ipa_qmi_ready(struct ipa_qmi *ipa_qmi)
{
- struct ipa *ipa = container_of(ipa_qmi, struct ipa, qmi);
+ struct ipa *ipa;
int ret;
/* We aren't ready until the modem and microcontroller are */
@@ -308,12 +308,12 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
mem = ipa_mem_find(ipa, IPA_MEM_V4_ROUTE);
req.v4_route_tbl_info_valid = 1;
req.v4_route_tbl_info.start = ipa->mem_offset + mem->offset;
- req.v4_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v4_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE);
req.v6_route_tbl_info_valid = 1;
req.v6_route_tbl_info.start = ipa->mem_offset + mem->offset;
- req.v6_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v6_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER);
req.v4_filter_tbl_start_valid = 1;
@@ -352,7 +352,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v4_hash_route_tbl_info_valid = 1;
req.v4_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
- req.v4_hash_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v4_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
}
mem = ipa_mem_find(ipa, IPA_MEM_V6_ROUTE_HASHED);
@@ -360,7 +360,7 @@ init_modem_driver_req(struct ipa_qmi *ipa_qmi)
req.v6_hash_route_tbl_info_valid = 1;
req.v6_hash_route_tbl_info.start =
ipa->mem_offset + mem->offset;
- req.v6_hash_route_tbl_info.count = mem->size / sizeof(__le64);
+ req.v6_hash_route_tbl_info.end = IPA_ROUTE_MODEM_COUNT - 1;
}
mem = ipa_mem_find(ipa, IPA_MEM_V4_FILTER_HASHED);
diff --git a/drivers/net/ipa/ipa_qmi.h b/drivers/net/ipa/ipa_qmi.h
index 856ef629ccc8..1c236826c17a 100644
--- a/drivers/net/ipa/ipa_qmi.h
+++ b/drivers/net/ipa/ipa_qmi.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_QMI_H_
#define _IPA_QMI_H_
diff --git a/drivers/net/ipa/ipa_qmi_msg.c b/drivers/net/ipa/ipa_qmi_msg.c
index 6838e8065072..97c0befe8d86 100644
--- a/drivers/net/ipa/ipa_qmi_msg.c
+++ b/drivers/net/ipa/ipa_qmi_msg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/stddef.h>
#include <linux/soc/qcom/qmi.h>
@@ -311,7 +311,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x12,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -332,7 +332,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x13,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -496,7 +496,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x1b,
.offset = offsetof(struct ipa_init_modem_driver_req,
v4_hash_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
@@ -517,7 +517,7 @@ struct qmi_elem_info ipa_init_modem_driver_req_ei[] = {
.tlv_type = 0x1c,
.offset = offsetof(struct ipa_init_modem_driver_req,
v6_hash_route_tbl_info),
- .ei_array = ipa_mem_array_ei,
+ .ei_array = ipa_mem_bounds_ei,
},
{
.data_type = QMI_OPT_FLAG,
diff --git a/drivers/net/ipa/ipa_qmi_msg.h b/drivers/net/ipa/ipa_qmi_msg.h
index 3233d145fd87..e29663965f43 100644
--- a/drivers/net/ipa/ipa_qmi_msg.h
+++ b/drivers/net/ipa/ipa_qmi_msg.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_QMI_MSG_H_
#define _IPA_QMI_MSG_H_
@@ -86,9 +86,11 @@ enum ipa_platform_type {
IPA_QMI_PLATFORM_TYPE_MSM_QNX_V01 = 0x5, /* QNX MSM */
};
-/* This defines the start and end offset of a range of memory. Both
- * fields are offsets relative to the start of IPA shared memory.
- * The end value is the last addressable byte *within* the range.
+/* This defines the start and end offset of a range of memory. The start
+ * value is a byte offset relative to the start of IPA shared memory. The
+ * end value is the last addressable unit *within* the range. Typically
+ * the end value is in units of bytes, however it can also be a maximum
+ * array index value.
*/
struct ipa_mem_bounds {
u32 start;
@@ -129,18 +131,19 @@ struct ipa_init_modem_driver_req {
u8 hdr_tbl_info_valid;
struct ipa_mem_bounds hdr_tbl_info;
- /* Routing table information. These define the location and size of
- * non-hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ /* Routing table information. These define the location and maximum
+ * *index* (not byte) for the modem portion of non-hashable IPv4 and
+ * IPv6 routing tables. The start values are byte offsets relative
+ * to the start of IPA shared memory.
*/
u8 v4_route_tbl_info_valid;
- struct ipa_mem_array v4_route_tbl_info;
+ struct ipa_mem_bounds v4_route_tbl_info;
u8 v6_route_tbl_info_valid;
- struct ipa_mem_array v6_route_tbl_info;
+ struct ipa_mem_bounds v6_route_tbl_info;
/* Filter table information. These define the location of the
* non-hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ * byte offsets relative to the start of IPA shared memory.
*/
u8 v4_filter_tbl_start_valid;
u32 v4_filter_tbl_start;
@@ -181,18 +184,20 @@ struct ipa_init_modem_driver_req {
u8 zip_tbl_info_valid;
struct ipa_mem_bounds zip_tbl_info;
- /* Routing table information. These define the location and size
- * of hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ /* Routing table information. These define the location and maximum
+ * *index* (not byte) for the modem portion of hashable IPv4 and IPv6
+ * routing tables (if supported by hardware). The start values are
+ * byte offsets relative to the start of IPA shared memory.
*/
u8 v4_hash_route_tbl_info_valid;
- struct ipa_mem_array v4_hash_route_tbl_info;
+ struct ipa_mem_bounds v4_hash_route_tbl_info;
u8 v6_hash_route_tbl_info_valid;
- struct ipa_mem_array v6_hash_route_tbl_info;
+ struct ipa_mem_bounds v6_hash_route_tbl_info;
/* Filter table information. These define the location and size
- * of hashable IPv4 and IPv6 filter tables. The start values are
- * offsets relative to the start of IPA shared memory.
+ * of hashable IPv4 and IPv6 filter tables (if supported by hardware).
+ * The start values are byte offsets relative to the start of IPA
+ * shared memory.
*/
u8 v4_hash_filter_tbl_start_valid;
u32 v4_hash_filter_tbl_start;
@@ -214,7 +219,7 @@ struct ipa_init_modem_driver_req {
/* The response to a IPA_QMI_INIT_DRIVER request begins with a standard
* QMI response, but contains other information as well. Currently we
- * simply wait for the the INIT_DRIVER transaction to complete and
+ * simply wait for the INIT_DRIVER transaction to complete and
* ignore any other data that might be returned.
*/
struct ipa_init_modem_driver_rsp {
diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c
index e6147a1cd787..22f067741d9b 100644
--- a/drivers/net/ipa/ipa_reg.c
+++ b/drivers/net/ipa/ipa_reg.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/io.h>
@@ -9,11 +9,105 @@
#include "ipa.h"
#include "ipa_reg.h"
+/* Is this register valid and defined for the current IPA version? */
+static bool ipa_reg_valid(struct ipa *ipa, enum ipa_reg_id reg_id)
+{
+ enum ipa_version version = ipa->version;
+ bool valid;
+
+ /* Check for bogus (out of range) register IDs */
+ if ((u32)reg_id >= ipa->regs->reg_count)
+ return false;
+
+ switch (reg_id) {
+ case IPA_BCR:
+ case COUNTER_CFG:
+ valid = version < IPA_VERSION_4_5;
+ break;
+
+ case IPA_TX_CFG:
+ case FLAVOR_0:
+ case IDLE_INDICATION_CFG:
+ valid = version >= IPA_VERSION_3_5;
+ break;
+
+ case QTIME_TIMESTAMP_CFG:
+ case TIMERS_XO_CLK_DIV_CFG:
+ case TIMERS_PULSE_GRAN_CFG:
+ valid = version >= IPA_VERSION_4_5;
+ break;
+
+ case SRC_RSRC_GRP_45_RSRC_TYPE:
+ case DST_RSRC_GRP_45_RSRC_TYPE:
+ valid = version <= IPA_VERSION_3_1 ||
+ version == IPA_VERSION_4_5;
+ break;
+
+ case SRC_RSRC_GRP_67_RSRC_TYPE:
+ case DST_RSRC_GRP_67_RSRC_TYPE:
+ valid = version <= IPA_VERSION_3_1;
+ break;
+
+ case ENDP_FILTER_ROUTER_HSH_CFG:
+ valid = version != IPA_VERSION_4_2;
+ break;
+
+ case IRQ_SUSPEND_EN:
+ case IRQ_SUSPEND_CLR:
+ valid = version >= IPA_VERSION_3_1;
+ break;
+
+ default:
+ valid = true; /* Others should be defined for all versions */
+ break;
+ }
+
+ /* To be valid, it must be defined */
+
+ return valid && ipa->regs->reg[reg_id];
+}
+
+const struct ipa_reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id)
+{
+ if (WARN_ON(!ipa_reg_valid(ipa, reg_id)))
+ return NULL;
+
+ return ipa->regs->reg[reg_id];
+}
+
+static const struct ipa_regs *ipa_regs(enum ipa_version version)
+{
+ switch (version) {
+ case IPA_VERSION_3_1:
+ return &ipa_regs_v3_1;
+ case IPA_VERSION_3_5_1:
+ return &ipa_regs_v3_5_1;
+ case IPA_VERSION_4_2:
+ return &ipa_regs_v4_2;
+ case IPA_VERSION_4_5:
+ return &ipa_regs_v4_5;
+ case IPA_VERSION_4_9:
+ return &ipa_regs_v4_9;
+ case IPA_VERSION_4_11:
+ return &ipa_regs_v4_11;
+ default:
+ return NULL;
+ }
+}
+
int ipa_reg_init(struct ipa *ipa)
{
struct device *dev = &ipa->pdev->dev;
+ const struct ipa_regs *regs;
struct resource *res;
+ regs = ipa_regs(ipa->version);
+ if (!regs)
+ return -EINVAL;
+
+ if (WARN_ON(regs->reg_count > IPA_REG_ID_COUNT))
+ return -EINVAL;
+
/* Setup IPA register memory */
res = platform_get_resource_byname(ipa->pdev, IORESOURCE_MEM,
"ipa-reg");
@@ -28,6 +122,7 @@ int ipa_reg_init(struct ipa *ipa)
return -ENOMEM;
}
ipa->reg_addr = res->start;
+ ipa->regs = regs;
return 0;
}
diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h
index a5b355384d4a..7bf70f70f63f 100644
--- a/drivers/net/ipa/ipa_reg.h
+++ b/drivers/net/ipa/ipa_reg.h
@@ -1,12 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#ifndef _IPA_REG_H_
#define _IPA_REG_H_
#include <linux/bitfield.h>
+#include <linux/bug.h>
#include "ipa_version.h"
@@ -16,304 +17,325 @@ struct ipa;
* DOC: IPA Registers
*
* IPA registers are located within the "ipa-reg" address space defined by
- * Device Tree. The offset of each register within that space is specified
- * by symbols defined below. The address space is mapped to virtual memory
- * space in ipa_mem_init(). All IPA registers are 32 bits wide.
+ * Device Tree. Each register has a specified offset within that space,
+ * which is mapped into virtual memory space in ipa_mem_init(). Each
+ * has a unique identifer, taken from the ipa_reg_id enumerated type.
+ * All IPA registers are 32 bits wide.
*
- * Certain register types are duplicated for a number of instances of
- * something. For example, each IPA endpoint has an set of registers
- * defining its configuration. The offset to an endpoint's set of registers
- * is computed based on an "base" offset, plus an endpoint's ID multiplied
- * and a "stride" value for the register. For such registers, the offset is
- * computed by a function-like macro that takes a parameter used in the
- * computation.
+ * Certain "parameterized" register types are duplicated for a number of
+ * instances of something. For example, each IPA endpoint has an set of
+ * registers defining its configuration. The offset to an endpoint's set
+ * of registers is computed based on an "base" offset, plus an endpoint's
+ * ID multiplied and a "stride" value for the register. Similarly, some
+ * registers have an offset that depends on execution environment. In
+ * this case, the stride is multiplied by a member of the gsi_ee_id
+ * enumerated type.
*
- * Some register offsets depend on execution environment. For these an "ee"
- * parameter is supplied to the offset macro. The "ee" value is a member of
- * the gsi_ee enumerated type.
+ * Each version of IPA implements an array of ipa_reg structures indexed
+ * by register ID. Each entry in the array specifies the base offset and
+ * (for parameterized registers) a non-zero stride value. Not all versions
+ * of IPA define all registers. The offset for a register is returned by
+ * ipa_reg_offset() when the register's ipa_reg structure is supplied;
+ * zero is returned for an undefined register (this should never happen).
*
- * The offset of a register dependent on endpoint ID is computed by a macro
- * that is supplied a parameter "ep", "txep", or "rxep". A register with an
- * "ep" parameter is valid for any endpoint; a register with a "txep" or
- * "rxep" parameter is valid only for TX or RX endpoints, respectively. The
- * "*ep" value is assumed to be less than the maximum valid endpoint ID
- * for the current hardware, and that will not exceed IPA_ENDPOINT_MAX.
- *
- * The offset of registers related to filter and route tables is computed
- * by a macro that is supplied a parameter "er". The "er" represents an
- * endpoint ID for filters, or a route ID for routes. For filters, the
- * endpoint ID must be less than IPA_ENDPOINT_MAX, but is further restricted
- * because not all endpoints support filtering. For routes, the route ID
- * must be less than IPA_ROUTE_MAX.
- *
- * The offset of registers related to resource types is computed by a macro
- * that is supplied a parameter "rt". The "rt" represents a resource type,
- * which is is a member of the ipa_resource_type_src enumerated type for
- * source endpoint resources or the ipa_resource_type_dst enumerated type
- * for destination endpoint resources.
- *
- * Some registers encode multiple fields within them. For these, each field
- * has a symbol below defining a field mask that encodes both the position
- * and width of the field within its register.
- *
- * In some cases, different versions of IPA hardware use different offset or
- * field mask values. In such cases an inline_function(ipa) is used rather
- * than a MACRO to define the offset or field mask to use.
- *
- * Finally, some registers hold bitmasks representing endpoints. In such
- * cases the @available field in the @ipa structure defines the "full" set
- * of valid bits for the register.
+ * Some registers encode multiple fields within them. Each field in
+ * such a register has a unique identifier (from an enumerated type).
+ * The position and width of the fields in a register are defined by
+ * an array of field masks, indexed by field ID. Two functions are
+ * used to access register fields; both take an ipa_reg structure as
+ * argument. To encode a value to be represented in a register field,
+ * the value and field ID are passed to ipa_reg_encode(). To extract
+ * a value encoded in a register field, the field ID is passed to
+ * ipa_reg_decode(). In addition, for single-bit fields, ipa_reg_bit()
+ * can be used to either encode the bit value, or to generate a mask
+ * used to extract the bit value.
*/
-#define IPA_REG_COMP_CFG_OFFSET 0x0000003c
-/* The next field is not supported for IPA v4.0+, not present for IPA v4.5+ */
-#define ENABLE_FMASK GENMASK(0, 0)
-/* The next field is present for IPA v4.7+ */
-#define RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS_FMASK GENMASK(0, 0)
-#define GSI_SNOC_BYPASS_DIS_FMASK GENMASK(1, 1)
-#define GEN_QMB_0_SNOC_BYPASS_DIS_FMASK GENMASK(2, 2)
-#define GEN_QMB_1_SNOC_BYPASS_DIS_FMASK GENMASK(3, 3)
-/* The next field is not present for IPA v4.5+ */
-#define IPA_DCMP_FAST_CLK_EN_FMASK GENMASK(4, 4)
-/* The next twelve fields are present for IPA v4.0+ */
-#define IPA_QMB_SELECT_CONS_EN_FMASK GENMASK(5, 5)
-#define IPA_QMB_SELECT_PROD_EN_FMASK GENMASK(6, 6)
-#define GSI_MULTI_INORDER_RD_DIS_FMASK GENMASK(7, 7)
-#define GSI_MULTI_INORDER_WR_DIS_FMASK GENMASK(8, 8)
-#define GEN_QMB_0_MULTI_INORDER_RD_DIS_FMASK GENMASK(9, 9)
-#define GEN_QMB_1_MULTI_INORDER_RD_DIS_FMASK GENMASK(10, 10)
-#define GEN_QMB_0_MULTI_INORDER_WR_DIS_FMASK GENMASK(11, 11)
-#define GEN_QMB_1_MULTI_INORDER_WR_DIS_FMASK GENMASK(12, 12)
-#define GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS_FMASK GENMASK(13, 13)
-#define GSI_SNOC_CNOC_LOOP_PROT_DISABLE_FMASK GENMASK(14, 14)
-#define GSI_MULTI_AXI_MASTERS_DIS_FMASK GENMASK(15, 15)
-#define IPA_QMB_SELECT_GLOBAL_EN_FMASK GENMASK(16, 16)
-/* The next five fields are present for IPA v4.9+ */
-#define QMB_RAM_RD_CACHE_DISABLE_FMASK GENMASK(19, 19)
-#define GENQMB_AOOOWR_FMASK GENMASK(20, 20)
-#define IF_OUT_OF_BUF_STOP_RESET_MASK_EN_FMASK GENMASK(21, 21)
-#define GEN_QMB_1_DYNAMIC_ASIZE_FMASK GENMASK(30, 30)
-#define GEN_QMB_0_DYNAMIC_ASIZE_FMASK GENMASK(31, 31)
-
-/* Encoded value for COMP_CFG register ATOMIC_FETCHER_ARB_LOCK_DIS field */
-static inline u32 arbitration_lock_disable_encoded(enum ipa_version version,
- u32 mask)
-{
- WARN_ON(version < IPA_VERSION_4_0);
+/* enum ipa_reg_id - IPA register IDs */
+enum ipa_reg_id {
+ COMP_CFG,
+ CLKON_CFG,
+ ROUTE,
+ SHARED_MEM_SIZE,
+ QSB_MAX_WRITES,
+ QSB_MAX_READS,
+ FILT_ROUT_HASH_EN,
+ FILT_ROUT_HASH_FLUSH,
+ STATE_AGGR_ACTIVE,
+ IPA_BCR, /* Not IPA v4.5+ */
+ LOCAL_PKT_PROC_CNTXT,
+ AGGR_FORCE_CLOSE,
+ COUNTER_CFG, /* Not IPA v4.5+ */
+ IPA_TX_CFG, /* IPA v3.5+ */
+ FLAVOR_0, /* IPA v3.5+ */
+ IDLE_INDICATION_CFG, /* IPA v3.5+ */
+ QTIME_TIMESTAMP_CFG, /* IPA v4.5+ */
+ TIMERS_XO_CLK_DIV_CFG, /* IPA v4.5+ */
+ TIMERS_PULSE_GRAN_CFG, /* IPA v4.5+ */
+ SRC_RSRC_GRP_01_RSRC_TYPE,
+ SRC_RSRC_GRP_23_RSRC_TYPE,
+ SRC_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+, IPA v4.5 */
+ SRC_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+ */
+ DST_RSRC_GRP_01_RSRC_TYPE,
+ DST_RSRC_GRP_23_RSRC_TYPE,
+ DST_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+, IPA v4.5 */
+ DST_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+ */
+ ENDP_INIT_CTRL, /* Not IPA v4.2+ for TX, not IPA v4.0+ for RX */
+ ENDP_INIT_CFG,
+ ENDP_INIT_NAT, /* TX only */
+ ENDP_INIT_HDR,
+ ENDP_INIT_HDR_EXT,
+ ENDP_INIT_HDR_METADATA_MASK, /* RX only */
+ ENDP_INIT_MODE, /* TX only */
+ ENDP_INIT_AGGR,
+ ENDP_INIT_HOL_BLOCK_EN, /* RX only */
+ ENDP_INIT_HOL_BLOCK_TIMER, /* RX only */
+ ENDP_INIT_DEAGGR, /* TX only */
+ ENDP_INIT_RSRC_GRP,
+ ENDP_INIT_SEQ, /* TX only */
+ ENDP_STATUS,
+ ENDP_FILTER_ROUTER_HSH_CFG, /* Not IPA v4.2 */
+ /* The IRQ registers are only used for GSI_EE_AP */
+ IPA_IRQ_STTS,
+ IPA_IRQ_EN,
+ IPA_IRQ_CLR,
+ IPA_IRQ_UC,
+ IRQ_SUSPEND_INFO,
+ IRQ_SUSPEND_EN, /* IPA v3.1+ */
+ IRQ_SUSPEND_CLR, /* IPA v3.1+ */
+ IPA_REG_ID_COUNT, /* Last; not an ID */
+};
- if (version < IPA_VERSION_4_9)
- return u32_encode_bits(mask, GENMASK(20, 17));
+/**
+ * struct ipa_reg - An IPA register descriptor
+ * @offset: Register offset relative to base of the "ipa-reg" memory
+ * @stride: Distance between two instances, if parameterized
+ * @fcount: Number of entries in the @fmask array
+ * @fmask: Array of mask values defining position and width of fields
+ * @name: Upper-case name of the IPA register
+ */
+struct ipa_reg {
+ u32 offset;
+ u32 stride;
+ u32 fcount;
+ const u32 *fmask; /* BIT(nr) or GENMASK(h, l) */
+ const char *name;
+};
- if (version == IPA_VERSION_4_9)
- return u32_encode_bits(mask, GENMASK(24, 22));
+/* Helper macro for defining "simple" (non-parameterized) registers */
+#define IPA_REG(__NAME, __reg_id, __offset) \
+ IPA_REG_STRIDE(__NAME, __reg_id, __offset, 0)
- return u32_encode_bits(mask, GENMASK(23, 22));
-}
+/* Helper macro for defining parameterized registers, specifying stride */
+#define IPA_REG_STRIDE(__NAME, __reg_id, __offset, __stride) \
+ static const struct ipa_reg ipa_reg_ ## __reg_id = { \
+ .name = #__NAME, \
+ .offset = __offset, \
+ .stride = __stride, \
+ }
-/* Encoded value for COMP_CFG register FULL_FLUSH_WAIT_RS_CLOSURE_EN field */
-static inline u32 full_flush_rsc_closure_en_encoded(enum ipa_version version,
- bool enable)
-{
- u32 val = enable ? 1 : 0;
+#define IPA_REG_FIELDS(__NAME, __name, __offset) \
+ IPA_REG_STRIDE_FIELDS(__NAME, __name, __offset, 0)
- WARN_ON(version < IPA_VERSION_4_5);
+#define IPA_REG_STRIDE_FIELDS(__NAME, __name, __offset, __stride) \
+ static const struct ipa_reg ipa_reg_ ## __name = { \
+ .name = #__NAME, \
+ .offset = __offset, \
+ .stride = __stride, \
+ .fcount = ARRAY_SIZE(ipa_reg_ ## __name ## _fmask), \
+ .fmask = ipa_reg_ ## __name ## _fmask, \
+ }
- if (version == IPA_VERSION_4_5 || version == IPA_VERSION_4_7)
- return u32_encode_bits(val, GENMASK(21, 21));
+/**
+ * struct ipa_regs - Description of registers supported by hardware
+ * @reg_count: Number of registers in the @reg[] array
+ * @reg: Array of register descriptors
+ */
+struct ipa_regs {
+ u32 reg_count;
+ const struct ipa_reg **reg;
+};
- return u32_encode_bits(val, GENMASK(17, 17));
-}
+/* COMP_CFG register */
+enum ipa_reg_comp_cfg_field_id {
+ COMP_CFG_ENABLE, /* Not IPA v4.0+ */
+ RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS, /* IPA v4.7+ */
+ GSI_SNOC_BYPASS_DIS,
+ GEN_QMB_0_SNOC_BYPASS_DIS,
+ GEN_QMB_1_SNOC_BYPASS_DIS,
+ IPA_DCMP_FAST_CLK_EN, /* Not IPA v4.5+ */
+ IPA_QMB_SELECT_CONS_EN, /* IPA v4.0+ */
+ IPA_QMB_SELECT_PROD_EN, /* IPA v4.0+ */
+ GSI_MULTI_INORDER_RD_DIS, /* IPA v4.0+ */
+ GSI_MULTI_INORDER_WR_DIS, /* IPA v4.0+ */
+ GEN_QMB_0_MULTI_INORDER_RD_DIS, /* IPA v4.0+ */
+ GEN_QMB_1_MULTI_INORDER_RD_DIS, /* IPA v4.0+ */
+ GEN_QMB_0_MULTI_INORDER_WR_DIS, /* IPA v4.0+ */
+ GEN_QMB_1_MULTI_INORDER_WR_DIS, /* IPA v4.0+ */
+ GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS, /* IPA v4.0+ */
+ GSI_SNOC_CNOC_LOOP_PROT_DISABLE, /* IPA v4.0+ */
+ GSI_MULTI_AXI_MASTERS_DIS, /* IPA v4.0+ */
+ IPA_QMB_SELECT_GLOBAL_EN, /* IPA v4.0+ */
+ QMB_RAM_RD_CACHE_DISABLE, /* IPA v4.9+ */
+ GENQMB_AOOOWR, /* IPA v4.9+ */
+ IF_OUT_OF_BUF_STOP_RESET_MASK_EN, /* IPA v4.9+ */
+ GEN_QMB_1_DYNAMIC_ASIZE, /* IPA v4.9+ */
+ GEN_QMB_0_DYNAMIC_ASIZE, /* IPA v4.9+ */
+ ATOMIC_FETCHER_ARB_LOCK_DIS, /* IPA v4.0+ */
+ FULL_FLUSH_WAIT_RS_CLOSURE_EN, /* IPA v4.5+ */
+};
-#define IPA_REG_CLKON_CFG_OFFSET 0x00000044
-#define RX_FMASK GENMASK(0, 0)
-#define PROC_FMASK GENMASK(1, 1)
-#define TX_WRAPPER_FMASK GENMASK(2, 2)
-#define MISC_FMASK GENMASK(3, 3)
-#define RAM_ARB_FMASK GENMASK(4, 4)
-#define FTCH_HPS_FMASK GENMASK(5, 5)
-#define FTCH_DPS_FMASK GENMASK(6, 6)
-#define HPS_FMASK GENMASK(7, 7)
-#define DPS_FMASK GENMASK(8, 8)
-#define RX_HPS_CMDQS_FMASK GENMASK(9, 9)
-#define HPS_DPS_CMDQS_FMASK GENMASK(10, 10)
-#define DPS_TX_CMDQS_FMASK GENMASK(11, 11)
-#define RSRC_MNGR_FMASK GENMASK(12, 12)
-#define CTX_HANDLER_FMASK GENMASK(13, 13)
-#define ACK_MNGR_FMASK GENMASK(14, 14)
-#define D_DCPH_FMASK GENMASK(15, 15)
-#define H_DCPH_FMASK GENMASK(16, 16)
-/* The next field is not present for IPA v4.5+ */
-#define DCMP_FMASK GENMASK(17, 17)
-/* The next three fields are present for IPA v3.5+ */
-#define NTF_TX_CMDQS_FMASK GENMASK(18, 18)
-#define TX_0_FMASK GENMASK(19, 19)
-#define TX_1_FMASK GENMASK(20, 20)
-/* The next field is present for IPA v3.5.1+ */
-#define FNR_FMASK GENMASK(21, 21)
-/* The next eight fields are present for IPA v4.0+ */
-#define QSB2AXI_CMDQ_L_FMASK GENMASK(22, 22)
-#define AGGR_WRAPPER_FMASK GENMASK(23, 23)
-#define RAM_SLAVEWAY_FMASK GENMASK(24, 24)
-#define QMB_FMASK GENMASK(25, 25)
-#define WEIGHT_ARB_FMASK GENMASK(26, 26)
-#define GSI_IF_FMASK GENMASK(27, 27)
-#define GLOBAL_FMASK GENMASK(28, 28)
-#define GLOBAL_2X_CLK_FMASK GENMASK(29, 29)
-/* The next field is present for IPA v4.5+ */
-#define DPL_FIFO_FMASK GENMASK(30, 30)
-/* The next field is present for IPA v4.7+ */
-#define DRBIP_FMASK GENMASK(31, 31)
-
-#define IPA_REG_ROUTE_OFFSET 0x00000048
-#define ROUTE_DIS_FMASK GENMASK(0, 0)
-#define ROUTE_DEF_PIPE_FMASK GENMASK(5, 1)
-#define ROUTE_DEF_HDR_TABLE_FMASK GENMASK(6, 6)
-#define ROUTE_DEF_HDR_OFST_FMASK GENMASK(16, 7)
-#define ROUTE_FRAG_DEF_PIPE_FMASK GENMASK(21, 17)
-#define ROUTE_DEF_RETAIN_HDR_FMASK GENMASK(24, 24)
-
-#define IPA_REG_SHARED_MEM_SIZE_OFFSET 0x00000054
-#define SHARED_MEM_SIZE_FMASK GENMASK(15, 0)
-#define SHARED_MEM_BADDR_FMASK GENMASK(31, 16)
-
-#define IPA_REG_QSB_MAX_WRITES_OFFSET 0x00000074
-#define GEN_QMB_0_MAX_WRITES_FMASK GENMASK(3, 0)
-#define GEN_QMB_1_MAX_WRITES_FMASK GENMASK(7, 4)
-
-#define IPA_REG_QSB_MAX_READS_OFFSET 0x00000078
-#define GEN_QMB_0_MAX_READS_FMASK GENMASK(3, 0)
-#define GEN_QMB_1_MAX_READS_FMASK GENMASK(7, 4)
-/* The next two fields are present for IPA v4.0+ */
-#define GEN_QMB_0_MAX_READS_BEATS_FMASK GENMASK(23, 16)
-#define GEN_QMB_1_MAX_READS_BEATS_FMASK GENMASK(31, 24)
-
-static inline u32 ipa_reg_filt_rout_hash_en_offset(enum ipa_version version)
-{
- if (version < IPA_VERSION_4_0)
- return 0x000008c;
+/* CLKON_CFG register */
+enum ipa_reg_clkon_cfg_field_id {
+ CLKON_RX,
+ CLKON_PROC,
+ TX_WRAPPER,
+ CLKON_MISC,
+ RAM_ARB,
+ FTCH_HPS,
+ FTCH_DPS,
+ CLKON_HPS,
+ CLKON_DPS,
+ RX_HPS_CMDQS,
+ HPS_DPS_CMDQS,
+ DPS_TX_CMDQS,
+ RSRC_MNGR,
+ CTX_HANDLER,
+ ACK_MNGR,
+ D_DCPH,
+ H_DCPH,
+ CLKON_DCMP, /* IPA v4.5+ */
+ NTF_TX_CMDQS, /* IPA v3.5+ */
+ CLKON_TX_0, /* IPA v3.5+ */
+ CLKON_TX_1, /* IPA v3.5+ */
+ CLKON_FNR, /* IPA v3.5.1+ */
+ QSB2AXI_CMDQ_L, /* IPA v4.0+ */
+ AGGR_WRAPPER, /* IPA v4.0+ */
+ RAM_SLAVEWAY, /* IPA v4.0+ */
+ CLKON_QMB, /* IPA v4.0+ */
+ WEIGHT_ARB, /* IPA v4.0+ */
+ GSI_IF, /* IPA v4.0+ */
+ CLKON_GLOBAL, /* IPA v4.0+ */
+ GLOBAL_2X_CLK, /* IPA v4.0+ */
+ DPL_FIFO, /* IPA v4.5+ */
+ DRBIP, /* IPA v4.7+ */
+};
- return 0x0000148;
-}
+/* ROUTE register */
+enum ipa_reg_route_field_id {
+ ROUTE_DIS,
+ ROUTE_DEF_PIPE,
+ ROUTE_DEF_HDR_TABLE,
+ ROUTE_DEF_HDR_OFST,
+ ROUTE_FRAG_DEF_PIPE,
+ ROUTE_DEF_RETAIN_HDR,
+};
-static inline u32 ipa_reg_filt_rout_hash_flush_offset(enum ipa_version version)
-{
- if (version < IPA_VERSION_4_0)
- return 0x0000090;
+/* SHARED_MEM_SIZE register */
+enum ipa_reg_shared_mem_size_field_id {
+ MEM_SIZE,
+ MEM_BADDR,
+};
- return 0x000014c;
-}
+/* QSB_MAX_WRITES register */
+enum ipa_reg_qsb_max_writes_field_id {
+ GEN_QMB_0_MAX_WRITES,
+ GEN_QMB_1_MAX_WRITES,
+};
-/* The next four fields are used for the hash enable and flush registers */
-#define IPV6_ROUTER_HASH_FMASK GENMASK(0, 0)
-#define IPV6_FILTER_HASH_FMASK GENMASK(4, 4)
-#define IPV4_ROUTER_HASH_FMASK GENMASK(8, 8)
-#define IPV4_FILTER_HASH_FMASK GENMASK(12, 12)
+/* QSB_MAX_READS register */
+enum ipa_reg_qsb_max_reads_field_id {
+ GEN_QMB_0_MAX_READS,
+ GEN_QMB_1_MAX_READS,
+ GEN_QMB_0_MAX_READS_BEATS, /* IPA v4.0+ */
+ GEN_QMB_1_MAX_READS_BEATS, /* IPA v4.0+ */
+};
-/* ipa->available defines the valid bits in the STATE_AGGR_ACTIVE register */
-static inline u32 ipa_reg_state_aggr_active_offset(enum ipa_version version)
-{
- if (version < IPA_VERSION_4_0)
- return 0x0000010c;
+/* FILT_ROUT_HASH_EN and FILT_ROUT_HASH_FLUSH registers */
+enum ipa_reg_rout_hash_field_id {
+ IPV6_ROUTER_HASH,
+ IPV6_FILTER_HASH,
+ IPV4_ROUTER_HASH,
+ IPV4_FILTER_HASH,
+};
- return 0x000000b4;
-}
+/* BCR register */
+enum ipa_bcr_compat {
+ BCR_CMDQ_L_LACK_ONE_ENTRY = 0x0, /* Not IPA v4.2+ */
+ BCR_TX_NOT_USING_BRESP = 0x1, /* Not IPA v4.2+ */
+ BCR_TX_SUSPEND_IRQ_ASSERT_ONCE = 0x2, /* Not IPA v4.0+ */
+ BCR_SUSPEND_L2_IRQ = 0x3, /* Not IPA v4.2+ */
+ BCR_HOLB_DROP_L2_IRQ = 0x4, /* Not IPA v4.2+ */
+ BCR_DUAL_TX = 0x5, /* IPA v3.5+ */
+ BCR_ENABLE_FILTER_DATA_CACHE = 0x6, /* IPA v3.5+ */
+ BCR_NOTIF_PRIORITY_OVER_ZLT = 0x7, /* IPA v3.5+ */
+ BCR_FILTER_PREFETCH_EN = 0x8, /* IPA v3.5+ */
+ BCR_ROUTER_PREFETCH_EN = 0x9, /* IPA v3.5+ */
+};
-/* The next register is not present for IPA v4.5+ */
-#define IPA_REG_BCR_OFFSET 0x000001d0
-/* The next two fields are not present for IPA v4.2+ */
-#define BCR_CMDQ_L_LACK_ONE_ENTRY_FMASK GENMASK(0, 0)
-#define BCR_TX_NOT_USING_BRESP_FMASK GENMASK(1, 1)
-/* The next field is invalid for IPA v4.0+ */
-#define BCR_TX_SUSPEND_IRQ_ASSERT_ONCE_FMASK GENMASK(2, 2)
-/* The next two fields are not present for IPA v4.2+ */
-#define BCR_SUSPEND_L2_IRQ_FMASK GENMASK(3, 3)
-#define BCR_HOLB_DROP_L2_IRQ_FMASK GENMASK(4, 4)
-/* The next five fields are present for IPA v3.5+ */
-#define BCR_DUAL_TX_FMASK GENMASK(5, 5)
-#define BCR_ENABLE_FILTER_DATA_CACHE_FMASK GENMASK(6, 6)
-#define BCR_NOTIF_PRIORITY_OVER_ZLT_FMASK GENMASK(7, 7)
-#define BCR_FILTER_PREFETCH_EN_FMASK GENMASK(8, 8)
-#define BCR_ROUTER_PREFETCH_EN_FMASK GENMASK(9, 9)
-
-/* The value of the next register must be a multiple of 8 (bottom 3 bits 0) */
-#define IPA_REG_LOCAL_PKT_PROC_CNTXT_OFFSET 0x000001e8
-
-/* Encoded value for LOCAL_PKT_PROC_CNTXT register BASE_ADDR field */
-static inline u32 proc_cntxt_base_addr_encoded(enum ipa_version version,
- u32 addr)
-{
- if (version < IPA_VERSION_4_5)
- return u32_encode_bits(addr, GENMASK(16, 0));
+/* LOCAL_PKT_PROC_CNTXT register */
+enum ipa_reg_local_pkt_proc_cntxt_field_id {
+ IPA_BASE_ADDR,
+};
- return u32_encode_bits(addr, GENMASK(17, 0));
-}
+/* COUNTER_CFG register */
+enum ipa_reg_counter_cfg_field_id {
+ EOT_COAL_GRANULARITY, /* Not v3.5+ */
+ AGGR_GRANULARITY,
+};
-/* ipa->available defines the valid bits in the AGGR_FORCE_CLOSE register */
-#define IPA_REG_AGGR_FORCE_CLOSE_OFFSET 0x000001ec
-
-/* The next register is not present for IPA v4.5+ */
-#define IPA_REG_COUNTER_CFG_OFFSET 0x000001f0
-/* The next field is not present for IPA v3.5+ */
-#define EOT_COAL_GRANULARITY GENMASK(3, 0)
-#define AGGR_GRANULARITY_FMASK GENMASK(8, 4)
-
-/* The next register is present for IPA v3.5+ */
-#define IPA_REG_TX_CFG_OFFSET 0x000001fc
-/* The next three fields are not present for IPA v4.0+ */
-#define TX0_PREFETCH_DISABLE_FMASK GENMASK(0, 0)
-#define TX1_PREFETCH_DISABLE_FMASK GENMASK(1, 1)
-#define PREFETCH_ALMOST_EMPTY_SIZE_FMASK GENMASK(4, 2)
-/* The next six fields are present for IPA v4.0+ */
-#define PREFETCH_ALMOST_EMPTY_SIZE_TX0_FMASK GENMASK(5, 2)
-#define DMAW_SCND_OUTSD_PRED_THRESHOLD_FMASK GENMASK(9, 6)
-#define DMAW_SCND_OUTSD_PRED_EN_FMASK GENMASK(10, 10)
-#define DMAW_MAX_BEATS_256_DIS_FMASK GENMASK(11, 11)
-#define PA_MASK_EN_FMASK GENMASK(12, 12)
-#define PREFETCH_ALMOST_EMPTY_SIZE_TX1_FMASK GENMASK(16, 13)
-/* The next field is present for IPA v4.5+ */
-#define DUAL_TX_ENABLE_FMASK GENMASK(17, 17)
-/* The next field is present for IPA v4.2+, but not IPA v4.5 */
-#define SSPND_PA_NO_START_STATE_FMASK GENMASK(18, 18)
-/* The next field is present for IPA v4.2 only */
-#define SSPND_PA_NO_BQ_STATE_FMASK GENMASK(19, 19)
-
-/* The next register is present for IPA v3.5+ */
-#define IPA_REG_FLAVOR_0_OFFSET 0x00000210
-#define IPA_MAX_PIPES_FMASK GENMASK(3, 0)
-#define IPA_MAX_CONS_PIPES_FMASK GENMASK(12, 8)
-#define IPA_MAX_PROD_PIPES_FMASK GENMASK(20, 16)
-#define IPA_PROD_LOWEST_FMASK GENMASK(27, 24)
-
-/* The next register is present for IPA v3.5+ */
-static inline u32 ipa_reg_idle_indication_cfg_offset(enum ipa_version version)
-{
- if (version >= IPA_VERSION_4_2)
- return 0x00000240;
+/* IPA_TX_CFG register */
+enum ipa_reg_ipa_tx_cfg_field_id {
+ TX0_PREFETCH_DISABLE, /* Not v4.0+ */
+ TX1_PREFETCH_DISABLE, /* Not v4.0+ */
+ PREFETCH_ALMOST_EMPTY_SIZE, /* Not v4.0+ */
+ PREFETCH_ALMOST_EMPTY_SIZE_TX0, /* v4.0+ */
+ DMAW_SCND_OUTSD_PRED_THRESHOLD, /* v4.0+ */
+ DMAW_SCND_OUTSD_PRED_EN, /* v4.0+ */
+ DMAW_MAX_BEATS_256_DIS, /* v4.0+ */
+ PA_MASK_EN, /* v4.0+ */
+ PREFETCH_ALMOST_EMPTY_SIZE_TX1, /* v4.0+ */
+ DUAL_TX_ENABLE, /* v4.5+ */
+ SSPND_PA_NO_START_STATE, /* v4,2+, not v4.5 */
+ SSPND_PA_NO_BQ_STATE, /* v4.2 only */
+};
- return 0x00000220;
-}
+/* FLAVOR_0 register */
+enum ipa_reg_flavor_0_field_id {
+ MAX_PIPES,
+ MAX_CONS_PIPES,
+ MAX_PROD_PIPES,
+ PROD_LOWEST,
+};
+
+/* IDLE_INDICATION_CFG register */
+enum ipa_reg_idle_indication_cfg_field_id {
+ ENTER_IDLE_DEBOUNCE_THRESH,
+ CONST_NON_IDLE_ENABLE,
+};
+
+/* QTIME_TIMESTAMP_CFG register */
+enum ipa_reg_qtime_timestamp_cfg_field_id {
+ DPL_TIMESTAMP_LSB,
+ DPL_TIMESTAMP_SEL,
+ TAG_TIMESTAMP_LSB,
+ NAT_TIMESTAMP_LSB,
+};
-#define ENTER_IDLE_DEBOUNCE_THRESH_FMASK GENMASK(15, 0)
-#define CONST_NON_IDLE_ENABLE_FMASK GENMASK(16, 16)
-
-/* The next register is present for IPA v4.5+ */
-#define IPA_REG_QTIME_TIMESTAMP_CFG_OFFSET 0x0000024c
-#define DPL_TIMESTAMP_LSB_FMASK GENMASK(4, 0)
-#define DPL_TIMESTAMP_SEL_FMASK GENMASK(7, 7)
-#define TAG_TIMESTAMP_LSB_FMASK GENMASK(12, 8)
-#define NAT_TIMESTAMP_LSB_FMASK GENMASK(20, 16)
-
-/* The next register is present for IPA v4.5+ */
-#define IPA_REG_TIMERS_XO_CLK_DIV_CFG_OFFSET 0x00000250
-#define DIV_VALUE_FMASK GENMASK(8, 0)
-#define DIV_ENABLE_FMASK GENMASK(31, 31)
-
-/* The next register is present for IPA v4.5+ */
-#define IPA_REG_TIMERS_PULSE_GRAN_CFG_OFFSET 0x00000254
-#define GRAN_0_FMASK GENMASK(2, 0)
-#define GRAN_1_FMASK GENMASK(5, 3)
-#define GRAN_2_FMASK GENMASK(8, 6)
-/* Values for GRAN_x fields of TIMERS_PULSE_GRAN_CFG */
+/* TIMERS_XO_CLK_DIV_CFG register */
+enum ipa_reg_timers_xo_clk_div_cfg_field_id {
+ DIV_VALUE,
+ DIV_ENABLE,
+};
+
+/* TIMERS_PULSE_GRAN_CFG register */
+enum ipa_reg_timers_pulse_gran_cfg_field_id {
+ PULSE_GRAN_0,
+ PULSE_GRAN_1,
+ PULSE_GRAN_2,
+};
+
+/* Values for IPA_GRAN_x fields of TIMERS_PULSE_GRAN_CFG */
enum ipa_pulse_gran {
IPA_GRAN_10_US = 0x0,
IPA_GRAN_20_US = 0x1,
@@ -325,267 +347,160 @@ enum ipa_pulse_gran {
IPA_GRAN_655350_US = 0x7,
};
-/* Not all of the following are present (depends on IPA version) */
-#define IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000400 + 0x0020 * (rt))
-#define IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000404 + 0x0020 * (rt))
-#define IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000408 + 0x0020 * (rt))
-#define IPA_REG_SRC_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(rt) \
- (0x0000040c + 0x0020 * (rt))
-#define IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000500 + 0x0020 * (rt))
-#define IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000504 + 0x0020 * (rt))
-#define IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(rt) \
- (0x00000508 + 0x0020 * (rt))
-#define IPA_REG_DST_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(rt) \
- (0x0000050c + 0x0020 * (rt))
-/* The next four fields are used for all resource group registers */
-#define X_MIN_LIM_FMASK GENMASK(5, 0)
-#define X_MAX_LIM_FMASK GENMASK(13, 8)
-/* The next two fields are not always present (if resource count is odd) */
-#define Y_MIN_LIM_FMASK GENMASK(21, 16)
-#define Y_MAX_LIM_FMASK GENMASK(29, 24)
-
-#define IPA_REG_ENDP_INIT_CTRL_N_OFFSET(ep) \
- (0x00000800 + 0x0070 * (ep))
-/* Valid only for RX (IPA producer) endpoints (do not use for IPA v4.0+) */
-#define ENDP_SUSPEND_FMASK GENMASK(0, 0)
-/* Valid only for TX (IPA consumer) endpoints */
-#define ENDP_DELAY_FMASK GENMASK(1, 1)
-
-#define IPA_REG_ENDP_INIT_CFG_N_OFFSET(ep) \
- (0x00000808 + 0x0070 * (ep))
-#define FRAG_OFFLOAD_EN_FMASK GENMASK(0, 0)
-#define CS_OFFLOAD_EN_FMASK GENMASK(2, 1)
-#define CS_METADATA_HDR_OFFSET_FMASK GENMASK(6, 3)
-#define CS_GEN_QMB_MASTER_SEL_FMASK GENMASK(8, 8)
+/* {SRC,DST}_RSRC_GRP_{01,23,45,67}_RSRC_TYPE registers */
+enum ipa_reg_rsrc_grp_rsrc_type_field_id {
+ X_MIN_LIM,
+ X_MAX_LIM,
+ Y_MIN_LIM,
+ Y_MAX_LIM,
+};
+
+/* ENDP_INIT_CTRL register */
+enum ipa_reg_endp_init_ctrl_field_id {
+ ENDP_SUSPEND, /* Not v4.0+ */
+ ENDP_DELAY, /* Not v4.2+ */
+};
+
+/* ENDP_INIT_CFG register */
+enum ipa_reg_endp_init_cfg_field_id {
+ FRAG_OFFLOAD_EN,
+ CS_OFFLOAD_EN,
+ CS_METADATA_HDR_OFFSET,
+ CS_GEN_QMB_MASTER_SEL,
+};
/** enum ipa_cs_offload_en - ENDP_INIT_CFG register CS_OFFLOAD_EN field value */
enum ipa_cs_offload_en {
- IPA_CS_OFFLOAD_NONE = 0x0,
- IPA_CS_OFFLOAD_UL = 0x1, /* Before IPA v4.5 (TX) */
- IPA_CS_OFFLOAD_DL = 0x2, /* Before IPA v4.5 (RX) */
- IPA_CS_OFFLOAD_INLINE = 0x1, /* IPA v4.5 (TX and RX) */
+ IPA_CS_OFFLOAD_NONE = 0x0,
+ IPA_CS_OFFLOAD_UL /* TX */ = 0x1, /* Not IPA v4.5+ */
+ IPA_CS_OFFLOAD_DL /* RX */ = 0x2, /* Not IPA v4.5+ */
+ IPA_CS_OFFLOAD_INLINE /* TX and RX */ = 0x1, /* IPA v4.5+ */
};
-/* Valid only for TX (IPA consumer) endpoints */
-#define IPA_REG_ENDP_INIT_NAT_N_OFFSET(ep) \
- (0x0000080c + 0x0070 * (ep))
-#define NAT_EN_FMASK GENMASK(1, 0)
+/* ENDP_INIT_NAT register */
+enum ipa_reg_endp_init_nat_field_id {
+ NAT_EN,
+};
/** enum ipa_nat_en - ENDP_INIT_NAT register NAT_EN field value */
enum ipa_nat_en {
- IPA_NAT_BYPASS = 0x0,
- IPA_NAT_SRC = 0x1,
- IPA_NAT_DST = 0x2,
-};
-
-#define IPA_REG_ENDP_INIT_HDR_N_OFFSET(ep) \
- (0x00000810 + 0x0070 * (ep))
-#define HDR_LEN_FMASK GENMASK(5, 0)
-#define HDR_OFST_METADATA_VALID_FMASK GENMASK(6, 6)
-#define HDR_OFST_METADATA_FMASK GENMASK(12, 7)
-#define HDR_ADDITIONAL_CONST_LEN_FMASK GENMASK(18, 13)
-#define HDR_OFST_PKT_SIZE_VALID_FMASK GENMASK(19, 19)
-#define HDR_OFST_PKT_SIZE_FMASK GENMASK(25, 20)
-/* The next field is not present for IPA v4.9+ */
-#define HDR_A5_MUX_FMASK GENMASK(26, 26)
-#define HDR_LEN_INC_DEAGG_HDR_FMASK GENMASK(27, 27)
-/* The next field is not present for IPA v4.5+ */
-#define HDR_METADATA_REG_VALID_FMASK GENMASK(28, 28)
-/* The next two fields are present for IPA v4.5+ */
-#define HDR_LEN_MSB_FMASK GENMASK(29, 28)
-#define HDR_OFST_METADATA_MSB_FMASK GENMASK(31, 30)
-
-/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
-static inline u32 ipa_header_size_encoded(enum ipa_version version,
- u32 header_size)
-{
- u32 size = header_size & field_mask(HDR_LEN_FMASK);
- u32 val;
-
- val = u32_encode_bits(size, HDR_LEN_FMASK);
- if (version < IPA_VERSION_4_5) {
- WARN_ON(header_size != size);
- return val;
- }
-
- /* IPA v4.5 adds a few more most-significant bits */
- size = header_size >> hweight32(HDR_LEN_FMASK);
- val |= u32_encode_bits(size, HDR_LEN_MSB_FMASK);
-
- return val;
-}
-
-/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
-static inline u32 ipa_metadata_offset_encoded(enum ipa_version version,
- u32 offset)
-{
- u32 off = offset & field_mask(HDR_OFST_METADATA_FMASK);
- u32 val;
-
- val = u32_encode_bits(off, HDR_OFST_METADATA_FMASK);
- if (version < IPA_VERSION_4_5) {
- WARN_ON(offset != off);
- return val;
- }
+ IPA_NAT_BYPASS = 0x0,
+ IPA_NAT_SRC = 0x1,
+ IPA_NAT_DST = 0x2,
+};
- /* IPA v4.5 adds a few more most-significant bits */
- off = offset >> hweight32(HDR_OFST_METADATA_FMASK);
- val |= u32_encode_bits(off, HDR_OFST_METADATA_MSB_FMASK);
+/* ENDP_INIT_HDR register */
+enum ipa_reg_endp_init_hdr_field_id {
+ HDR_LEN,
+ HDR_OFST_METADATA_VALID,
+ HDR_OFST_METADATA,
+ HDR_ADDITIONAL_CONST_LEN,
+ HDR_OFST_PKT_SIZE_VALID,
+ HDR_OFST_PKT_SIZE,
+ HDR_A5_MUX, /* Not v4.9+ */
+ HDR_LEN_INC_DEAGG_HDR,
+ HDR_METADATA_REG_VALID, /* Not v4.5+ */
+ HDR_LEN_MSB, /* v4.5+ */
+ HDR_OFST_METADATA_MSB, /* v4.5+ */
+};
- return val;
-}
+/* ENDP_INIT_HDR_EXT register */
+enum ipa_reg_endp_init_hdr_ext_field_id {
+ HDR_ENDIANNESS,
+ HDR_TOTAL_LEN_OR_PAD_VALID,
+ HDR_TOTAL_LEN_OR_PAD,
+ HDR_PAYLOAD_LEN_INC_PADDING,
+ HDR_TOTAL_LEN_OR_PAD_OFFSET,
+ HDR_PAD_TO_ALIGNMENT,
+ HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB, /* v4.5+ */
+ HDR_OFST_PKT_SIZE_MSB, /* v4.5+ */
+ HDR_ADDITIONAL_CONST_LEN_MSB, /* v4.5+ */
+};
-#define IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(ep) \
- (0x00000814 + 0x0070 * (ep))
-#define HDR_ENDIANNESS_FMASK GENMASK(0, 0)
-#define HDR_TOTAL_LEN_OR_PAD_VALID_FMASK GENMASK(1, 1)
-#define HDR_TOTAL_LEN_OR_PAD_FMASK GENMASK(2, 2)
-#define HDR_PAYLOAD_LEN_INC_PADDING_FMASK GENMASK(3, 3)
-#define HDR_TOTAL_LEN_OR_PAD_OFFSET_FMASK GENMASK(9, 4)
-#define HDR_PAD_TO_ALIGNMENT_FMASK GENMASK(13, 10)
-/* The next three fields are present for IPA v4.5+ */
-#define HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB_FMASK GENMASK(17, 16)
-#define HDR_OFST_PKT_SIZE_MSB_FMASK GENMASK(19, 18)
-#define HDR_ADDITIONAL_CONST_LEN_MSB_FMASK GENMASK(21, 20)
-
-/* Valid only for RX (IPA producer) endpoints */
-#define IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(rxep) \
- (0x00000818 + 0x0070 * (rxep))
-
-/* Valid only for TX (IPA consumer) endpoints */
-#define IPA_REG_ENDP_INIT_MODE_N_OFFSET(txep) \
- (0x00000820 + 0x0070 * (txep))
-#define MODE_FMASK GENMASK(2, 0)
-/* The next field is present for IPA v4.5+ */
-#define DCPH_ENABLE_FMASK GENMASK(3, 3)
-#define DEST_PIPE_INDEX_FMASK GENMASK(8, 4)
-#define BYTE_THRESHOLD_FMASK GENMASK(27, 12)
-#define PIPE_REPLICATION_EN_FMASK GENMASK(28, 28)
-#define PAD_EN_FMASK GENMASK(29, 29)
-/* The next field is not present for IPA v4.5+ */
-#define HDR_FTCH_DISABLE_FMASK GENMASK(30, 30)
-/* The next field is present for IPA v4.9+ */
-#define DRBIP_ACL_ENABLE GENMASK(30, 30)
+/* ENDP_INIT_MODE register */
+enum ipa_reg_endp_init_mode_field_id {
+ ENDP_MODE,
+ DCPH_ENABLE, /* v4.5+ */
+ DEST_PIPE_INDEX,
+ BYTE_THRESHOLD,
+ PIPE_REPLICATION_EN,
+ PAD_EN,
+ HDR_FTCH_DISABLE, /* v4.5+ */
+ DRBIP_ACL_ENABLE, /* v4.9+ */
+};
/** enum ipa_mode - ENDP_INIT_MODE register MODE field value */
enum ipa_mode {
- IPA_BASIC = 0x0,
- IPA_ENABLE_FRAMING_HDLC = 0x1,
- IPA_ENABLE_DEFRAMING_HDLC = 0x2,
- IPA_DMA = 0x3,
+ IPA_BASIC = 0x0,
+ IPA_ENABLE_FRAMING_HDLC = 0x1,
+ IPA_ENABLE_DEFRAMING_HDLC = 0x2,
+ IPA_DMA = 0x3,
};
-#define IPA_REG_ENDP_INIT_AGGR_N_OFFSET(ep) \
- (0x00000824 + 0x0070 * (ep))
-#define AGGR_EN_FMASK GENMASK(1, 0)
-#define AGGR_TYPE_FMASK GENMASK(4, 2)
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_byte_limit_fmask(bool legacy)
-{
- return legacy ? GENMASK(9, 5) : GENMASK(10, 5);
-}
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_time_limit_fmask(bool legacy)
-{
- return legacy ? GENMASK(14, 10) : GENMASK(16, 12);
-}
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_pkt_limit_fmask(bool legacy)
-{
- return legacy ? GENMASK(20, 15) : GENMASK(22, 17);
-}
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_sw_eof_active_fmask(bool legacy)
-{
- return legacy ? GENMASK(21, 21) : GENMASK(23, 23);
-}
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_force_close_fmask(bool legacy)
-{
- return legacy ? GENMASK(22, 22) : GENMASK(24, 24);
-}
-
-/* The legacy value is used for IPA hardware before IPA v4.5 */
-static inline u32 aggr_hard_byte_limit_enable_fmask(bool legacy)
-{
- return legacy ? GENMASK(24, 24) : GENMASK(26, 26);
-}
-
-/* The next field is present for IPA v4.5+ */
-#define AGGR_GRAN_SEL_FMASK GENMASK(27, 27)
+/* ENDP_INIT_AGGR register */
+enum ipa_reg_endp_init_aggr_field_id {
+ AGGR_EN,
+ AGGR_TYPE,
+ BYTE_LIMIT,
+ TIME_LIMIT,
+ PKT_LIMIT,
+ SW_EOF_ACTIVE,
+ FORCE_CLOSE,
+ HARD_BYTE_LIMIT_EN,
+ AGGR_GRAN_SEL,
+};
/** enum ipa_aggr_en - ENDP_INIT_AGGR register AGGR_EN field value */
enum ipa_aggr_en {
- IPA_BYPASS_AGGR = 0x0, /* (TX, RX) */
- IPA_ENABLE_AGGR = 0x1, /* (RX) */
- IPA_ENABLE_DEAGGR = 0x2, /* (TX) */
+ IPA_BYPASS_AGGR /* TX and RX */ = 0x0,
+ IPA_ENABLE_AGGR /* RX */ = 0x1,
+ IPA_ENABLE_DEAGGR /* TX */ = 0x2,
};
/** enum ipa_aggr_type - ENDP_INIT_AGGR register AGGR_TYPE field value */
enum ipa_aggr_type {
- IPA_MBIM_16 = 0x0,
- IPA_HDLC = 0x1,
- IPA_TLP = 0x2,
- IPA_RNDIS = 0x3,
- IPA_GENERIC = 0x4,
- IPA_COALESCE = 0x5,
- IPA_QCMAP = 0x6,
-};
-
-/* Valid only for RX (IPA producer) endpoints */
-#define IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(rxep) \
- (0x0000082c + 0x0070 * (rxep))
-#define HOL_BLOCK_EN_FMASK GENMASK(0, 0)
-
-/* Valid only for RX (IPA producer) endpoints */
-#define IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(rxep) \
- (0x00000830 + 0x0070 * (rxep))
-/* The next two fields are present for IPA v4.2 only */
-#define BASE_VALUE_FMASK GENMASK(4, 0)
-#define SCALE_FMASK GENMASK(12, 8)
-/* The next two fields are present for IPA v4.5 */
-#define TIME_LIMIT_FMASK GENMASK(4, 0)
-#define GRAN_SEL_FMASK GENMASK(8, 8)
-
-/* Valid only for TX (IPA consumer) endpoints */
-#define IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(txep) \
- (0x00000834 + 0x0070 * (txep))
-#define DEAGGR_HDR_LEN_FMASK GENMASK(5, 0)
-#define SYSPIPE_ERR_DETECTION_FMASK GENMASK(6, 6)
-#define PACKET_OFFSET_VALID_FMASK GENMASK(7, 7)
-#define PACKET_OFFSET_LOCATION_FMASK GENMASK(13, 8)
-#define IGNORE_MIN_PKT_ERR_FMASK GENMASK(14, 14)
-#define MAX_PACKET_LEN_FMASK GENMASK(31, 16)
-
-#define IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(ep) \
- (0x00000838 + 0x0070 * (ep))
-/* Encoded value for ENDP_INIT_RSRC_GRP register RSRC_GRP field */
-static inline u32 rsrc_grp_encoded(enum ipa_version version, u32 rsrc_grp)
-{
- if (version < IPA_VERSION_3_5 || version == IPA_VERSION_4_5)
- return u32_encode_bits(rsrc_grp, GENMASK(2, 0));
+ IPA_MBIM_16 = 0x0,
+ IPA_HDLC = 0x1,
+ IPA_TLP = 0x2,
+ IPA_RNDIS = 0x3,
+ IPA_GENERIC = 0x4,
+ IPA_COALESCE = 0x5,
+ IPA_QCMAP = 0x6,
+};
- if (version == IPA_VERSION_4_2 || version == IPA_VERSION_4_7)
- return u32_encode_bits(rsrc_grp, GENMASK(0, 0));
+/* ENDP_INIT_HOL_BLOCK_EN register */
+enum ipa_reg_endp_init_hol_block_en_field_id {
+ HOL_BLOCK_EN,
+};
- return u32_encode_bits(rsrc_grp, GENMASK(1, 0));
-}
+/* ENDP_INIT_HOL_BLOCK_TIMER register */
+enum ipa_reg_endp_init_hol_block_timer_field_id {
+ TIMER_BASE_VALUE, /* Not v4.5+ */
+ TIMER_SCALE, /* v4.2 only */
+ TIMER_LIMIT, /* v4.5+ */
+ TIMER_GRAN_SEL, /* v4.5+ */
+};
-/* Valid only for TX (IPA consumer) endpoints */
-#define IPA_REG_ENDP_INIT_SEQ_N_OFFSET(txep) \
- (0x0000083c + 0x0070 * (txep))
-#define SEQ_TYPE_FMASK GENMASK(7, 0)
-#define SEQ_REP_TYPE_FMASK GENMASK(15, 8)
+/* ENDP_INIT_DEAGGR register */
+enum ipa_reg_endp_deaggr_field_id {
+ DEAGGR_HDR_LEN,
+ SYSPIPE_ERR_DETECTION,
+ PACKET_OFFSET_VALID,
+ PACKET_OFFSET_LOCATION,
+ IGNORE_MIN_PKT_ERR,
+ MAX_PACKET_LEN,
+};
+
+/* ENDP_INIT_RSRC_GRP register */
+enum ipa_reg_endp_init_rsrc_grp_field_id {
+ ENDP_RSRC_GRP,
+};
+
+/* ENDP_INIT_SEQ register */
+enum ipa_reg_endp_init_seq_field_id {
+ SEQ_TYPE,
+ SEQ_REP_TYPE, /* Not v4.5+ */
+};
/**
* enum ipa_seq_type - HPS and DPS sequencer type
@@ -629,76 +544,36 @@ enum ipa_seq_rep_type {
IPA_SEQ_REP_DMA_PARSER = 0x08,
};
-#define IPA_REG_ENDP_STATUS_N_OFFSET(ep) \
- (0x00000840 + 0x0070 * (ep))
-#define STATUS_EN_FMASK GENMASK(0, 0)
-#define STATUS_ENDP_FMASK GENMASK(5, 1)
-/* The next field is not present for IPA v4.5+ */
-#define STATUS_LOCATION_FMASK GENMASK(8, 8)
-/* The next field is present for IPA v4.0+ */
-#define STATUS_PKT_SUPPRESS_FMASK GENMASK(9, 9)
-
-/* The next register is not present for IPA v4.2 (which no hashing support) */
-#define IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(er) \
- (0x0000085c + 0x0070 * (er))
-#define FILTER_HASH_MSK_SRC_ID_FMASK GENMASK(0, 0)
-#define FILTER_HASH_MSK_SRC_IP_FMASK GENMASK(1, 1)
-#define FILTER_HASH_MSK_DST_IP_FMASK GENMASK(2, 2)
-#define FILTER_HASH_MSK_SRC_PORT_FMASK GENMASK(3, 3)
-#define FILTER_HASH_MSK_DST_PORT_FMASK GENMASK(4, 4)
-#define FILTER_HASH_MSK_PROTOCOL_FMASK GENMASK(5, 5)
-#define FILTER_HASH_MSK_METADATA_FMASK GENMASK(6, 6)
-#define IPA_REG_ENDP_FILTER_HASH_MSK_ALL GENMASK(6, 0)
-
-#define ROUTER_HASH_MSK_SRC_ID_FMASK GENMASK(16, 16)
-#define ROUTER_HASH_MSK_SRC_IP_FMASK GENMASK(17, 17)
-#define ROUTER_HASH_MSK_DST_IP_FMASK GENMASK(18, 18)
-#define ROUTER_HASH_MSK_SRC_PORT_FMASK GENMASK(19, 19)
-#define ROUTER_HASH_MSK_DST_PORT_FMASK GENMASK(20, 20)
-#define ROUTER_HASH_MSK_PROTOCOL_FMASK GENMASK(21, 21)
-#define ROUTER_HASH_MSK_METADATA_FMASK GENMASK(22, 22)
-#define IPA_REG_ENDP_ROUTER_HASH_MSK_ALL GENMASK(22, 16)
-
-static inline u32 ipa_reg_irq_stts_ee_n_offset(enum ipa_version version,
- u32 ee)
-{
- if (version < IPA_VERSION_4_9)
- return 0x00003008 + 0x1000 * ee;
-
- return 0x00004008 + 0x1000 * ee;
-}
-
-static inline u32 ipa_reg_irq_stts_offset(enum ipa_version version)
-{
- return ipa_reg_irq_stts_ee_n_offset(version, GSI_EE_AP);
-}
-
-static inline u32 ipa_reg_irq_en_ee_n_offset(enum ipa_version version, u32 ee)
-{
- if (version < IPA_VERSION_4_9)
- return 0x0000300c + 0x1000 * ee;
-
- return 0x0000400c + 0x1000 * ee;
-}
-
-static inline u32 ipa_reg_irq_en_offset(enum ipa_version version)
-{
- return ipa_reg_irq_en_ee_n_offset(version, GSI_EE_AP);
-}
-
-static inline u32 ipa_reg_irq_clr_ee_n_offset(enum ipa_version version, u32 ee)
-{
- if (version < IPA_VERSION_4_9)
- return 0x00003010 + 0x1000 * ee;
-
- return 0x00004010 + 0x1000 * ee;
-}
+/* ENDP_STATUS register */
+enum ipa_reg_endp_status_field_id {
+ STATUS_EN,
+ STATUS_ENDP,
+ STATUS_LOCATION, /* Not v4.5+ */
+ STATUS_PKT_SUPPRESS, /* v4.0+ */
+};
-static inline u32 ipa_reg_irq_clr_offset(enum ipa_version version)
-{
- return ipa_reg_irq_clr_ee_n_offset(version, GSI_EE_AP);
-}
+/* ENDP_FILTER_ROUTER_HSH_CFG register */
+enum ipa_reg_endp_filter_router_hsh_cfg_field_id {
+ FILTER_HASH_MSK_SRC_ID,
+ FILTER_HASH_MSK_SRC_IP,
+ FILTER_HASH_MSK_DST_IP,
+ FILTER_HASH_MSK_SRC_PORT,
+ FILTER_HASH_MSK_DST_PORT,
+ FILTER_HASH_MSK_PROTOCOL,
+ FILTER_HASH_MSK_METADATA,
+ FILTER_HASH_MSK_ALL, /* Bitwise OR of the above 6 fields */
+
+ ROUTER_HASH_MSK_SRC_ID,
+ ROUTER_HASH_MSK_SRC_IP,
+ ROUTER_HASH_MSK_DST_IP,
+ ROUTER_HASH_MSK_SRC_PORT,
+ ROUTER_HASH_MSK_DST_PORT,
+ ROUTER_HASH_MSK_PROTOCOL,
+ ROUTER_HASH_MSK_METADATA,
+ ROUTER_HASH_MSK_ALL, /* Bitwise OR of the above 6 fields */
+};
+/* IPA_IRQ_STTS, IPA_IRQ_EN, and IPA_IRQ_CLR registers */
/**
* enum ipa_irq_id - Bit positions representing type of IPA IRQ
* @IPA_IRQ_UC_0: Microcontroller event interrupt
@@ -774,74 +649,82 @@ enum ipa_irq_id {
IPA_IRQ_COUNT, /* Last; not an id */
};
-static inline u32 ipa_reg_irq_uc_ee_n_offset(enum ipa_version version, u32 ee)
-{
- if (version < IPA_VERSION_4_9)
- return 0x0000301c + 0x1000 * ee;
+/* IPA_IRQ_UC register */
+enum ipa_reg_ipa_irq_uc_field_id {
+ UC_INTR,
+};
- return 0x0000401c + 0x1000 * ee;
-}
+extern const struct ipa_regs ipa_regs_v3_1;
+extern const struct ipa_regs ipa_regs_v3_5_1;
+extern const struct ipa_regs ipa_regs_v4_2;
+extern const struct ipa_regs ipa_regs_v4_5;
+extern const struct ipa_regs ipa_regs_v4_9;
+extern const struct ipa_regs ipa_regs_v4_11;
-static inline u32 ipa_reg_irq_uc_offset(enum ipa_version version)
+/* Return the field mask for a field in a register */
+static inline u32 ipa_reg_fmask(const struct ipa_reg *reg, u32 field_id)
{
- return ipa_reg_irq_uc_ee_n_offset(version, GSI_EE_AP);
-}
+ if (!reg || WARN_ON(field_id >= reg->fcount))
+ return 0;
-#define UC_INTR_FMASK GENMASK(0, 0)
+ return reg->fmask[field_id];
+}
-/* ipa->available defines the valid bits in the SUSPEND_INFO register */
-static inline u32
-ipa_reg_irq_suspend_info_ee_n_offset(enum ipa_version version, u32 ee)
+/* Return the mask for a single-bit field in a register */
+static inline u32 ipa_reg_bit(const struct ipa_reg *reg, u32 field_id)
{
- if (version == IPA_VERSION_3_0)
- return 0x00003098 + 0x1000 * ee;
+ u32 fmask = ipa_reg_fmask(reg, field_id);
- if (version < IPA_VERSION_4_9)
- return 0x00003030 + 0x1000 * ee;
+ WARN_ON(!is_power_of_2(fmask));
- return 0x00004030 + 0x1000 * ee;
+ return fmask;
}
+/* Encode a value into the given field of a register */
static inline u32
-ipa_reg_irq_suspend_info_offset(enum ipa_version version)
+ipa_reg_encode(const struct ipa_reg *reg, u32 field_id, u32 val)
{
- return ipa_reg_irq_suspend_info_ee_n_offset(version, GSI_EE_AP);
-}
+ u32 fmask = ipa_reg_fmask(reg, field_id);
-/* ipa->available defines the valid bits in the SUSPEND_EN register */
-static inline u32
-ipa_reg_irq_suspend_en_ee_n_offset(enum ipa_version version, u32 ee)
-{
- WARN_ON(version == IPA_VERSION_3_0);
+ if (!fmask)
+ return 0;
- if (version < IPA_VERSION_4_9)
- return 0x00003034 + 0x1000 * ee;
+ val <<= __ffs(fmask);
+ if (WARN_ON(val & ~fmask))
+ return 0;
- return 0x00004034 + 0x1000 * ee;
+ return val;
}
+/* Given a register value, decode (extract) the value in the given field */
static inline u32
-ipa_reg_irq_suspend_en_offset(enum ipa_version version)
+ipa_reg_decode(const struct ipa_reg *reg, u32 field_id, u32 val)
{
- return ipa_reg_irq_suspend_en_ee_n_offset(version, GSI_EE_AP);
+ u32 fmask = ipa_reg_fmask(reg, field_id);
+
+ return fmask ? (val & fmask) >> __ffs(fmask) : 0;
}
-/* ipa->available defines the valid bits in the SUSPEND_CLR register */
-static inline u32
-ipa_reg_irq_suspend_clr_ee_n_offset(enum ipa_version version, u32 ee)
+/* Return the maximum value representable by the given field; always 2^n - 1 */
+static inline u32 ipa_reg_field_max(const struct ipa_reg *reg, u32 field_id)
{
- WARN_ON(version == IPA_VERSION_3_0);
+ u32 fmask = ipa_reg_fmask(reg, field_id);
- if (version < IPA_VERSION_4_9)
- return 0x00003038 + 0x1000 * ee;
+ return fmask ? fmask >> __ffs(fmask) : 0;
+}
+
+const struct ipa_reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id);
- return 0x00004038 + 0x1000 * ee;
+/* Returns 0 for NULL reg; warning will have already been issued */
+static inline u32 ipa_reg_offset(const struct ipa_reg *reg)
+{
+ return reg ? reg->offset : 0;
}
-static inline u32
-ipa_reg_irq_suspend_clr_offset(enum ipa_version version)
+/* Returns 0 for NULL reg; warning will have already been issued */
+static inline u32 ipa_reg_n_offset(const struct ipa_reg *reg, u32 n)
{
- return ipa_reg_irq_suspend_clr_ee_n_offset(version, GSI_EE_AP);
+ return reg ? reg->offset + n * reg->stride : 0;
}
int ipa_reg_init(struct ipa *ipa);
diff --git a/drivers/net/ipa/ipa_resource.c b/drivers/net/ipa/ipa_resource.c
index 06cec7199382..a257f0e5e361 100644
--- a/drivers/net/ipa/ipa_resource.c
+++ b/drivers/net/ipa/ipa_resource.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -69,20 +69,21 @@ static bool ipa_resource_limits_valid(struct ipa *ipa,
}
static void
-ipa_resource_config_common(struct ipa *ipa, u32 offset,
+ipa_resource_config_common(struct ipa *ipa, u32 resource_type,
+ const struct ipa_reg *reg,
const struct ipa_resource_limits *xlimits,
const struct ipa_resource_limits *ylimits)
{
u32 val;
- val = u32_encode_bits(xlimits->min, X_MIN_LIM_FMASK);
- val |= u32_encode_bits(xlimits->max, X_MAX_LIM_FMASK);
+ val = ipa_reg_encode(reg, X_MIN_LIM, xlimits->min);
+ val |= ipa_reg_encode(reg, X_MAX_LIM, xlimits->max);
if (ylimits) {
- val |= u32_encode_bits(ylimits->min, Y_MIN_LIM_FMASK);
- val |= u32_encode_bits(ylimits->max, Y_MAX_LIM_FMASK);
+ val |= ipa_reg_encode(reg, Y_MIN_LIM, ylimits->min);
+ val |= ipa_reg_encode(reg, Y_MAX_LIM, ylimits->max);
}
- iowrite32(val, ipa->reg_virt + offset);
+ iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, resource_type));
}
static void ipa_resource_config_src(struct ipa *ipa, u32 resource_type,
@@ -91,34 +92,35 @@ static void ipa_resource_config_src(struct ipa *ipa, u32 resource_type,
u32 group_count = data->rsrc_group_src_count;
const struct ipa_resource_limits *ylimits;
const struct ipa_resource *resource;
- u32 offset;
+ const struct ipa_reg *reg;
resource = &data->resource_src[resource_type];
- offset = IPA_REG_SRC_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_01_RSRC_TYPE);
ylimits = group_count == 1 ? NULL : &resource->limits[1];
- ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[0], ylimits);
if (group_count < 3)
return;
- offset = IPA_REG_SRC_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_23_RSRC_TYPE);
ylimits = group_count == 3 ? NULL : &resource->limits[3];
- ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[2], ylimits);
if (group_count < 5)
return;
- offset = IPA_REG_SRC_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_45_RSRC_TYPE);
ylimits = group_count == 5 ? NULL : &resource->limits[5];
- ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[4], ylimits);
if (group_count < 7)
return;
- offset = IPA_REG_SRC_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, SRC_RSRC_GRP_67_RSRC_TYPE);
ylimits = group_count == 7 ? NULL : &resource->limits[7];
- ipa_resource_config_common(ipa, offset, &resource->limits[6], ylimits);
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[6], ylimits);
}
static void ipa_resource_config_dst(struct ipa *ipa, u32 resource_type,
@@ -127,34 +129,35 @@ static void ipa_resource_config_dst(struct ipa *ipa, u32 resource_type,
u32 group_count = data->rsrc_group_dst_count;
const struct ipa_resource_limits *ylimits;
const struct ipa_resource *resource;
- u32 offset;
+ const struct ipa_reg *reg;
resource = &data->resource_dst[resource_type];
- offset = IPA_REG_DST_RSRC_GRP_01_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, DST_RSRC_GRP_01_RSRC_TYPE);
ylimits = group_count == 1 ? NULL : &resource->limits[1];
- ipa_resource_config_common(ipa, offset, &resource->limits[0], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[0], ylimits);
if (group_count < 3)
return;
- offset = IPA_REG_DST_RSRC_GRP_23_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, DST_RSRC_GRP_23_RSRC_TYPE);
ylimits = group_count == 3 ? NULL : &resource->limits[3];
- ipa_resource_config_common(ipa, offset, &resource->limits[2], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[2], ylimits);
if (group_count < 5)
return;
- offset = IPA_REG_DST_RSRC_GRP_45_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, DST_RSRC_GRP_45_RSRC_TYPE);
ylimits = group_count == 5 ? NULL : &resource->limits[5];
- ipa_resource_config_common(ipa, offset, &resource->limits[4], ylimits);
-
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[4], ylimits);
if (group_count < 7)
return;
- offset = IPA_REG_DST_RSRC_GRP_67_RSRC_TYPE_N_OFFSET(resource_type);
+ reg = ipa_reg(ipa, DST_RSRC_GRP_67_RSRC_TYPE);
ylimits = group_count == 7 ? NULL : &resource->limits[7];
- ipa_resource_config_common(ipa, offset, &resource->limits[6], ylimits);
+ ipa_resource_config_common(ipa, resource_type, reg,
+ &resource->limits[6], ylimits);
}
/* Configure resources; there is no ipa_resource_deconfig() */
diff --git a/drivers/net/ipa/ipa_smp2p.c b/drivers/net/ipa/ipa_smp2p.c
index 211233612039..5620dc271fac 100644
--- a/drivers/net/ipa/ipa_smp2p.c
+++ b/drivers/net/ipa/ipa_smp2p.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#include <linux/types.h>
diff --git a/drivers/net/ipa/ipa_smp2p.h b/drivers/net/ipa/ipa_smp2p.h
index 59cee31a7383..9b969b03d1a4 100644
--- a/drivers/net/ipa/ipa_smp2p.h
+++ b/drivers/net/ipa/ipa_smp2p.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_SMP2P_H_
#define _IPA_SMP2P_H_
diff --git a/drivers/net/ipa/ipa_sysfs.c b/drivers/net/ipa/ipa_sysfs.c
index ff61dbdd70d8..5cbc15a971f9 100644
--- a/drivers/net/ipa/ipa_sysfs.c
+++ b/drivers/net/ipa/ipa_sysfs.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-/* Copyright (C) 2021 Linaro Ltd. */
+/* Copyright (C) 2021-2022 Linaro Ltd. */
#include <linux/kernel.h>
#include <linux/types.h>
@@ -96,38 +96,71 @@ const struct attribute_group ipa_feature_attribute_group = {
.attrs = ipa_feature_attrs,
};
-static ssize_t
-ipa_endpoint_id_show(struct ipa *ipa, char *buf, enum ipa_endpoint_name name)
+static umode_t ipa_endpoint_id_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
{
- u32 endpoint_id = ipa->name_map[name]->endpoint_id;
+ struct ipa *ipa = dev_get_drvdata(kobj_to_dev(kobj));
+ struct device_attribute *dev_attr;
+ struct dev_ext_attribute *ea;
+ bool visible;
+
+ /* An endpoint id attribute is only visible if it's defined */
+ dev_attr = container_of(attr, struct device_attribute, attr);
+ ea = container_of(dev_attr, struct dev_ext_attribute, attr);
- return scnprintf(buf, PAGE_SIZE, "%u\n", endpoint_id);
+ visible = !!ipa->name_map[(enum ipa_endpoint_name)(uintptr_t)ea->var];
+
+ return visible ? attr->mode : 0;
}
-static ssize_t rx_endpoint_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t endpoint_id_attr_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
struct ipa *ipa = dev_get_drvdata(dev);
+ struct ipa_endpoint *endpoint;
+ struct dev_ext_attribute *ea;
+
+ ea = container_of(attr, struct dev_ext_attribute, attr);
+ endpoint = ipa->name_map[(enum ipa_endpoint_name)(uintptr_t)ea->var];
- return ipa_endpoint_id_show(ipa, buf, IPA_ENDPOINT_AP_MODEM_RX);
+ return sysfs_emit(buf, "%u\n", endpoint->endpoint_id);
}
-static DEVICE_ATTR_RO(rx_endpoint_id);
+#define ENDPOINT_ID_ATTR(_n, _endpoint_name) \
+ static struct dev_ext_attribute dev_attr_endpoint_id_ ## _n = { \
+ .attr = __ATTR(_n, 0444, endpoint_id_attr_show, NULL), \
+ .var = (void *)(_endpoint_name), \
+ }
-static ssize_t tx_endpoint_id_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct ipa *ipa = dev_get_drvdata(dev);
+ENDPOINT_ID_ATTR(modem_rx, IPA_ENDPOINT_AP_MODEM_RX);
+ENDPOINT_ID_ATTR(modem_tx, IPA_ENDPOINT_AP_MODEM_TX);
- return ipa_endpoint_id_show(ipa, buf, IPA_ENDPOINT_AP_MODEM_TX);
-}
+static struct attribute *ipa_endpoint_id_attrs[] = {
+ &dev_attr_endpoint_id_modem_rx.attr.attr,
+ &dev_attr_endpoint_id_modem_tx.attr.attr,
+ NULL
+};
-static DEVICE_ATTR_RO(tx_endpoint_id);
+const struct attribute_group ipa_endpoint_id_attribute_group = {
+ .name = "endpoint_id",
+ .is_visible = ipa_endpoint_id_is_visible,
+ .attrs = ipa_endpoint_id_attrs,
+};
+
+/* Reuse endpoint ID attributes for the legacy modem endpoint IDs */
+#define MODEM_ATTR(_n, _endpoint_name) \
+ static struct dev_ext_attribute dev_attr_modem_ ## _n = { \
+ .attr = __ATTR(_n, 0444, endpoint_id_attr_show, NULL), \
+ .var = (void *)(_endpoint_name), \
+ }
+
+MODEM_ATTR(rx_endpoint_id, IPA_ENDPOINT_AP_MODEM_RX);
+MODEM_ATTR(tx_endpoint_id, IPA_ENDPOINT_AP_MODEM_TX);
static struct attribute *ipa_modem_attrs[] = {
- &dev_attr_rx_endpoint_id.attr,
- &dev_attr_tx_endpoint_id.attr,
- NULL
+ &dev_attr_modem_rx_endpoint_id.attr.attr,
+ &dev_attr_modem_tx_endpoint_id.attr.attr,
+ NULL,
};
const struct attribute_group ipa_modem_attribute_group = {
diff --git a/drivers/net/ipa/ipa_sysfs.h b/drivers/net/ipa/ipa_sysfs.h
index b34e5650bf8c..58ba22810bab 100644
--- a/drivers/net/ipa/ipa_sysfs.h
+++ b/drivers/net/ipa/ipa_sysfs.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_SYSFS_H_
#define _IPA_SYSFS_H_
@@ -10,6 +10,7 @@ struct attribute_group;
extern const struct attribute_group ipa_attribute_group;
extern const struct attribute_group ipa_feature_attribute_group;
+extern const struct attribute_group ipa_endpoint_id_attribute_group;
extern const struct attribute_group ipa_modem_attribute_group;
#endif /* _IPA_SYSFS_H_ */
diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c
index 1da334f54944..510ff2dc8999 100644
--- a/drivers/net/ipa/ipa_table.c
+++ b/drivers/net/ipa/ipa_table.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2021 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -108,8 +108,6 @@
/* Assignment of route table entries to the modem and AP */
#define IPA_ROUTE_MODEM_MIN 0
-#define IPA_ROUTE_MODEM_COUNT 8
-
#define IPA_ROUTE_AP_MIN IPA_ROUTE_MODEM_COUNT
#define IPA_ROUTE_AP_COUNT \
(IPA_ROUTE_COUNT_MAX - IPA_ROUTE_MODEM_COUNT)
@@ -386,8 +384,9 @@ void ipa_table_reset(struct ipa *ipa, bool modem)
int ipa_table_hash_flush(struct ipa *ipa)
{
- u32 offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
+ const struct ipa_reg *reg;
struct gsi_trans *trans;
+ u32 offset;
u32 val;
if (!ipa_table_hash_support(ipa))
@@ -399,8 +398,13 @@ int ipa_table_hash_flush(struct ipa *ipa)
return -EBUSY;
}
- val = IPV4_FILTER_HASH_FMASK | IPV6_FILTER_HASH_FMASK;
- val |= IPV6_ROUTER_HASH_FMASK | IPV4_ROUTER_HASH_FMASK;
+ reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH);
+ offset = ipa_reg_offset(reg);
+
+ val = ipa_reg_bit(reg, IPV6_ROUTER_HASH);
+ val |= ipa_reg_bit(reg, IPV6_FILTER_HASH);
+ val |= ipa_reg_bit(reg, IPV4_ROUTER_HASH);
+ val |= ipa_reg_bit(reg, IPV4_FILTER_HASH);
ipa_cmd_register_write_add(trans, offset, val, val, false);
@@ -419,21 +423,26 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
dma_addr_t hash_addr;
dma_addr_t addr;
+ u32 zero_offset;
u16 hash_count;
+ u32 zero_size;
u16 hash_size;
u16 count;
u16 size;
- /* The number of filtering endpoints determines number of entries
- * in the filter table. The hashed and non-hashed filter table
- * will have the same number of entries. The size of the route
- * table region determines the number of entries it has.
- */
+ /* Compute the number of table entries to initialize */
if (filter) {
- /* Include one extra "slot" to hold the filter map itself */
+ /* The number of filtering endpoints determines number of
+ * entries in the filter table; we also add one more "slot"
+ * to hold the bitmap itself. The size of the hashed filter
+ * table is either the same as the non-hashed one, or zero.
+ */
count = 1 + hweight32(ipa->filter_map);
hash_count = hash_mem->size ? count : 0;
} else {
+ /* The size of a route table region determines the number
+ * of entries it has.
+ */
count = mem->size / sizeof(__le64);
hash_count = hash_mem->size / sizeof(__le64);
}
@@ -445,13 +454,42 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
ipa_cmd_table_init_add(trans, opcode, size, mem->offset, addr,
hash_size, hash_mem->offset, hash_addr);
+ if (!filter)
+ return;
+
+ /* Zero the unused space in the filter table */
+ zero_offset = mem->offset + size;
+ zero_size = mem->size - size;
+ ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size,
+ ipa->zero_addr, true);
+ if (!hash_size)
+ return;
+
+ /* Zero the unused space in the hashed filter table */
+ zero_offset = hash_mem->offset + hash_size;
+ zero_size = hash_mem->size - hash_size;
+ ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size,
+ ipa->zero_addr, true);
}
int ipa_table_setup(struct ipa *ipa)
{
struct gsi_trans *trans;
- trans = ipa_cmd_trans_alloc(ipa, 4);
+ /* We will need at most 8 TREs:
+ * - IPv4:
+ * - One for route table initialization (non-hashed and hashed)
+ * - One for filter table initialization (non-hashed and hashed)
+ * - One to zero unused entries in the non-hashed filter table
+ * - One to zero unused entries in the hashed filter table
+ * - IPv6:
+ * - One for route table initialization (non-hashed and hashed)
+ * - One for filter table initialization (non-hashed and hashed)
+ * - One to zero unused entries in the non-hashed filter table
+ * - One to zero unused entries in the hashed filter table
+ * All platforms support at least 8 TREs in a transaction.
+ */
+ trans = ipa_cmd_trans_alloc(ipa, 8);
if (!trans) {
dev_err(&ipa->pdev->dev, "no transaction for table setup\n");
return -EBUSY;
@@ -484,15 +522,18 @@ int ipa_table_setup(struct ipa *ipa)
static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint)
{
u32 endpoint_id = endpoint->endpoint_id;
+ struct ipa *ipa = endpoint->ipa;
+ const struct ipa_reg *reg;
u32 offset;
u32 val;
- offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(endpoint_id);
+ reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
+ offset = ipa_reg_n_offset(reg, endpoint_id);
val = ioread32(endpoint->ipa->reg_virt + offset);
/* Zero all filter-related fields, preserving the rest */
- u32p_replace_bits(&val, 0, IPA_REG_ENDP_FILTER_HASH_MSK_ALL);
+ val &= ~ipa_reg_fmask(reg, FILTER_HASH_MSK_ALL);
iowrite32(val, endpoint->ipa->reg_virt + offset);
}
@@ -533,13 +574,17 @@ static bool ipa_route_id_modem(u32 route_id)
*/
static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id)
{
- u32 offset = IPA_REG_ENDP_FILTER_ROUTER_HSH_CFG_N_OFFSET(route_id);
+ const struct ipa_reg *reg;
+ u32 offset;
u32 val;
+ reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG);
+ offset = ipa_reg_n_offset(reg, route_id);
+
val = ioread32(ipa->reg_virt + offset);
/* Zero all route-related fields, preserving the rest */
- u32p_replace_bits(&val, 0, IPA_REG_ENDP_ROUTER_HASH_MSK_ALL);
+ val &= ~ipa_reg_fmask(reg, ROUTER_HASH_MSK_ALL);
iowrite32(val, ipa->reg_virt + offset);
}
diff --git a/drivers/net/ipa/ipa_table.h b/drivers/net/ipa/ipa_table.h
index b6a9a0d79d68..395189f75d78 100644
--- a/drivers/net/ipa/ipa_table.h
+++ b/drivers/net/ipa/ipa_table.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2021 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_TABLE_H_
#define _IPA_TABLE_H_
@@ -13,6 +13,9 @@ struct ipa;
/* The maximum number of filter table entries (IPv4, IPv6; hashed or not) */
#define IPA_FILTER_COUNT_MAX 14
+/* The number of route table entries allotted to the modem */
+#define IPA_ROUTE_MODEM_COUNT 8
+
/* The maximum number of route table entries (IPv4, IPv6; hashed or not) */
#define IPA_ROUTE_COUNT_MAX 15
diff --git a/drivers/net/ipa/ipa_uc.c b/drivers/net/ipa/ipa_uc.c
index 856e55a080a7..f0ee47281015 100644
--- a/drivers/net/ipa/ipa_uc.c
+++ b/drivers/net/ipa/ipa_uc.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2018-2020 Linaro Ltd.
+ * Copyright (C) 2018-2022 Linaro Ltd.
*/
#include <linux/types.h>
@@ -11,6 +11,7 @@
#include "ipa.h"
#include "ipa_uc.h"
+#include "ipa_power.h"
/**
* DOC: The IPA embedded microcontroller
@@ -154,6 +155,7 @@ static void ipa_uc_response_hdlr(struct ipa *ipa, enum ipa_irq_id irq_id)
case IPA_UC_RESPONSE_INIT_COMPLETED:
if (ipa->uc_powered) {
ipa->uc_loaded = true;
+ ipa_power_retention(ipa, true);
pm_runtime_mark_last_busy(dev);
(void)pm_runtime_put_autosuspend(dev);
ipa->uc_powered = false;
@@ -184,6 +186,9 @@ void ipa_uc_deconfig(struct ipa *ipa)
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_1);
ipa_interrupt_remove(ipa->interrupt, IPA_IRQ_UC_0);
+ if (ipa->uc_loaded)
+ ipa_power_retention(ipa, false);
+
if (!ipa->uc_powered)
return;
@@ -217,7 +222,7 @@ void ipa_uc_power(struct ipa *ipa)
static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
{
struct ipa_uc_mem_area *shared = ipa_uc_shared(ipa);
- u32 offset;
+ const struct ipa_reg *reg;
u32 val;
/* Fill in the command data */
@@ -228,9 +233,10 @@ static void send_uc_command(struct ipa *ipa, u32 command, u32 command_param)
shared->response_param = 0;
/* Use an interrupt to tell the microcontroller the command is ready */
- val = u32_encode_bits(1, UC_INTR_FMASK);
- offset = ipa_reg_irq_uc_offset(ipa->version);
- iowrite32(val, ipa->reg_virt + offset);
+ reg = ipa_reg(ipa, IPA_IRQ_UC);
+ val = ipa_reg_bit(reg, UC_INTR);
+
+ iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
}
/* Tell the microcontroller the AP is shutting down */
diff --git a/drivers/net/ipa/ipa_uc.h b/drivers/net/ipa/ipa_uc.h
index 23847f934d64..8514096e6f36 100644
--- a/drivers/net/ipa/ipa_uc.h
+++ b/drivers/net/ipa/ipa_uc.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_UC_H_
#define _IPA_UC_H_
diff --git a/drivers/net/ipa/ipa_version.h b/drivers/net/ipa/ipa_version.h
index 6c16c895d842..7870e0cc3d7c 100644
--- a/drivers/net/ipa/ipa_version.h
+++ b/drivers/net/ipa/ipa_version.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
- * Copyright (C) 2019-2020 Linaro Ltd.
+ * Copyright (C) 2019-2022 Linaro Ltd.
*/
#ifndef _IPA_VERSION_H_
#define _IPA_VERSION_H_
@@ -19,10 +19,10 @@
* @IPA_VERSION_4_7: IPA version 4.7/GSI version 2.7
* @IPA_VERSION_4_9: IPA version 4.9/GSI version 2.9
* @IPA_VERSION_4_11: IPA version 4.11/GSI version 2.11 (2.1.1)
+ * @IPA_VERSION_COUNT: Number of defined IPA versions
*
* Defines the version of IPA (and GSI) hardware present on the platform.
- * Please update ipa_version_valid() and ipa_version_string() whenever a
- * new version is added.
+ * Please update ipa_version_string() whenever a new version is added.
*/
enum ipa_version {
IPA_VERSION_3_0,
@@ -36,6 +36,30 @@ enum ipa_version {
IPA_VERSION_4_7,
IPA_VERSION_4_9,
IPA_VERSION_4_11,
+ IPA_VERSION_COUNT, /* Last; not a version */
+};
+
+static inline bool ipa_version_supported(enum ipa_version version)
+{
+ switch (version) {
+ case IPA_VERSION_3_1:
+ case IPA_VERSION_3_5_1:
+ case IPA_VERSION_4_2:
+ case IPA_VERSION_4_5:
+ case IPA_VERSION_4_9:
+ case IPA_VERSION_4_11:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/* Execution environment IDs */
+enum gsi_ee_id {
+ GSI_EE_AP = 0x0,
+ GSI_EE_MODEM = 0x1,
+ GSI_EE_UC = 0x2,
+ GSI_EE_TZ = 0x3,
};
#endif /* _IPA_VERSION_H_ */
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.1.c b/drivers/net/ipa/reg/ipa_reg-v3.1.c
new file mode 100644
index 000000000000..0d002c3c38a2
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v3.1.c
@@ -0,0 +1,446 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ [COMP_CFG_ENABLE] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ [IPA_DCMP_FAST_CLK_EN] = BIT(4),
+ /* Bits 5-31 reserved */
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c);
+
+IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(16, 0),
+ /* Bits 17-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_counter_cfg_fmask[] = {
+ [EOT_COAL_GRANULARITY] = GENMASK(3, 0),
+ [AGGR_GRANULARITY] = GENMASK(8, 4),
+ /* Bits 5-31 reserved */
+};
+
+IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
+ 0x00000408, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_67_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_67_RSRC_TYPE, src_rsrc_grp_67_rsrc_type,
+ 0x0000040c, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
+ 0x00000508, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_67_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(7, 0),
+ [X_MAX_LIM] = GENMASK(15, 8),
+ [Y_MIN_LIM] = GENMASK(23, 16),
+ [Y_MAX_LIM] = GENMASK(31, 24),
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_67_RSRC_TYPE, dst_rsrc_grp_67_rsrc_type,
+ 0x0000050c, 0x0020);
+
+static const u32 ipa_reg_endp_init_ctrl_fmask[] = {
+ [ENDP_SUSPEND] = BIT(0),
+ [ENDP_DELAY] = BIT(1),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_METADATA_REG_VALID] = BIT(28),
+ /* Bits 29-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ /* Bit 3 reserved */
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [HDR_FTCH_DISABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(9, 5),
+ [TIME_LIMIT] = GENMASK(14, 10),
+ [PKT_LIMIT] = GENMASK(20, 15),
+ [SW_EOF_ACTIVE] = BIT(21),
+ [FORCE_CLOSE] = BIT(22),
+ /* Bit 23 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+/* Entire register is a tick count */
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_BASE_VALUE] = GENMASK(31, 0),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(2, 0),
+ /* Bits 3-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ [SEQ_REP_TYPE] = GENMASK(15, 8),
+ /* Bits 16-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-7 reserved */
+ [STATUS_LOCATION] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [IPA_BCR] = &ipa_reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [COUNTER_CFG] = &ipa_reg_counter_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [SRC_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_45_rsrc_type,
+ [SRC_RSRC_GRP_67_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_67_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_45_rsrc_type,
+ [DST_RSRC_GRP_67_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_67_rsrc_type,
+ [ENDP_INIT_CTRL] = &ipa_reg_endp_init_ctrl,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v3_1 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v3.5.1.c b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
new file mode 100644
index 000000000000..6e2f939b18f1
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v3.5.1.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ [COMP_CFG_ENABLE] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ [IPA_DCMP_FAST_CLK_EN] = BIT(4),
+ /* Bits 5-31 reserved */
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bit 17 reserved */
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x000008c);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x0000090);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x0000010c);
+
+IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(16, 0),
+ /* Bits 17-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_counter_cfg_fmask[] = {
+ /* Bits 0-3 reserved */
+ [AGGR_GRANULARITY] = GENMASK(8, 4),
+ /* Bits 5-31 reserved */
+};
+
+IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+
+static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+ [TX0_PREFETCH_DISABLE] = BIT(0),
+ [TX1_PREFETCH_DISABLE] = BIT(1),
+ [PREFETCH_ALMOST_EMPTY_SIZE] = GENMASK(4, 2),
+ /* Bits 5-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 ipa_reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000220);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_endp_init_ctrl_fmask[] = {
+ [ENDP_SUSPEND] = BIT(0),
+ [ENDP_DELAY] = BIT(1),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CTRL, endp_init_ctrl, 0x00000800, 0x0070);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_METADATA_REG_VALID] = BIT(28),
+ /* Bits 29-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ /* Bit 3 reserved */
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [HDR_FTCH_DISABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(9, 5),
+ [TIME_LIMIT] = GENMASK(14, 10),
+ [PKT_LIMIT] = GENMASK(20, 15),
+ [SW_EOF_ACTIVE] = BIT(21),
+ [FORCE_CLOSE] = BIT(22),
+ /* Bit 23 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+/* Entire register is a tick count */
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_BASE_VALUE] = GENMASK(31, 0),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ [SEQ_REP_TYPE] = GENMASK(15, 8),
+ /* Bits 16-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-7 reserved */
+ [STATUS_LOCATION] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [IPA_BCR] = &ipa_reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [COUNTER_CFG] = &ipa_reg_counter_cfg,
+ [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
+ [FLAVOR_0] = &ipa_reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CTRL] = &ipa_reg_endp_init_ctrl,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v3_5_1 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.11.c b/drivers/net/ipa/reg/ipa_reg-v4.11.c
new file mode 100644
index 000000000000..8fd36569bb9f
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.11.c
@@ -0,0 +1,512 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ [RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ /* Bit 4 reserved */
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(17),
+ /* Bit 18 reserved */
+ [QMB_RAM_RD_CACHE_DISABLE] = BIT(19),
+ [GENQMB_AOOOWR] = BIT(20),
+ [IF_OUT_OF_BUF_STOP_RESET_MASK_EN] = BIT(21),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(23, 22),
+ /* Bits 24-29 reserved */
+ [GEN_QMB_1_DYNAMIC_ASIZE] = BIT(30),
+ [GEN_QMB_0_DYNAMIC_ASIZE] = BIT(31),
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bit 17 reserved */
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ [DPL_FIFO] = BIT(30),
+ [DRBIP] = BIT(31),
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(17, 0),
+ /* Bits 18-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ [DUAL_TX_ENABLE] = BIT(17),
+ [SSPND_PA_NO_START_STATE] = BIT(18),
+ /* Bits 19-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 ipa_reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+ [DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
+ /* Bits 5-6 reserved */
+ [DPL_TIMESTAMP_SEL] = BIT(7),
+ [TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
+ /* Bits 21-31 reserved */
+};
+
+IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+
+static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+ [DIV_VALUE] = GENMASK(8, 0),
+ /* Bits 9-30 reserved */
+ [DIV_ENABLE] = BIT(31),
+};
+
+IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+
+static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+ [PULSE_GRAN_0] = GENMASK(2, 0),
+ [PULSE_GRAN_1] = GENMASK(5, 3),
+ [PULSE_GRAN_2] = GENMASK(8, 6),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ /* Bit 26 reserved */
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_LEN_MSB] = GENMASK(29, 28),
+ [HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
+ [HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
+ [HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ [DCPH_ENABLE] = BIT(3),
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [DRBIP_ACL_ENABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(10, 5),
+ /* Bit 11 reserved */
+ [TIME_LIMIT] = GENMASK(16, 12),
+ [PKT_LIMIT] = GENMASK(22, 17),
+ [SW_EOF_ACTIVE] = BIT(23),
+ [FORCE_CLOSE] = BIT(24),
+ /* Bit 25 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(26),
+ [AGGR_GRAN_SEL] = BIT(27),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_LIMIT] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_GRAN_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-8 reserved */
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00004030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00004034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00004038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
+ [FLAVOR_0] = &ipa_reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v4_11 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.2.c b/drivers/net/ipa/reg/ipa_reg-v4.2.c
new file mode 100644
index 000000000000..f8e78e1907c8
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.2.c
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ /* Bit 0 reserved */
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ [IPA_DCMP_FAST_CLK_EN] = BIT(4),
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(20, 17),
+ /* Bits 21-31 reserved */
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ /* Bit 17 reserved */
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+
+IPA_REG(IPA_BCR, ipa_bcr, 0x000001d0);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(16, 0),
+ /* Bits 17-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_counter_cfg_fmask[] = {
+ /* Bits 0-3 reserved */
+ [AGGR_GRANULARITY] = GENMASK(8, 4),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0);
+
+static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ /* Bit 17 reserved */
+ [SSPND_PA_NO_START_STATE] = BIT(18),
+ [SSPND_PA_NO_BQ_STATE] = BIT(19),
+ /* Bits 20-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 ipa_reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_METADATA_REG_VALID] = BIT(28),
+ /* Bits 29-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ /* Bit 3 reserved */
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [HDR_FTCH_DISABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(9, 5),
+ [TIME_LIMIT] = GENMASK(14, 10),
+ [PKT_LIMIT] = GENMASK(20, 15),
+ [SW_EOF_ACTIVE] = BIT(21),
+ [FORCE_CLOSE] = BIT(22),
+ /* Bit 23 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_BASE_VALUE] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_SCALE] = GENMASK(12, 8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ [SEQ_REP_TYPE] = GENMASK(15, 8),
+ /* Bits 16-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-7 reserved */
+ [STATUS_LOCATION] = BIT(8),
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [IPA_BCR] = &ipa_reg_ipa_bcr,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [COUNTER_CFG] = &ipa_reg_counter_cfg,
+ [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
+ [FLAVOR_0] = &ipa_reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v4_2 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.5.c b/drivers/net/ipa/reg/ipa_reg-v4.5.c
new file mode 100644
index 000000000000..d32b805abb11
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.5.c
@@ -0,0 +1,533 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ /* Bit 0 reserved */
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ /* Bit 4 reserved */
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(20, 17),
+ [FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(21),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ [CLKON_DCMP] = BIT(17),
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ [DPL_FIFO] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(17, 0),
+ /* Bits 18-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ [DUAL_TX_ENABLE] = BIT(17),
+ /* Bits 18-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 ipa_reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+ [DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
+ /* Bits 5-6 reserved */
+ [DPL_TIMESTAMP_SEL] = BIT(7),
+ [TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
+ /* Bits 21-31 reserved */
+};
+
+IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+
+static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+ [DIV_VALUE] = GENMASK(8, 0),
+ /* Bits 9-30 reserved */
+ [DIV_ENABLE] = BIT(31),
+};
+
+IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+
+static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+ [PULSE_GRAN_0] = GENMASK(2, 0),
+ [PULSE_GRAN_1] = GENMASK(5, 3),
+ [PULSE_GRAN_2] = GENMASK(8, 6),
+};
+
+IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type,
+ 0x00000408, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_45_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type,
+ 0x00000508, 0x0020);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_A5_MUX] = BIT(26),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_LEN_MSB] = GENMASK(29, 28),
+ [HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
+ [HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
+ [HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ [DCPH_ENABLE] = BIT(3),
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(10, 5),
+ /* Bit 11 reserved */
+ [TIME_LIMIT] = GENMASK(16, 12),
+ [PKT_LIMIT] = GENMASK(22, 17),
+ [SW_EOF_ACTIVE] = BIT(23),
+ [FORCE_CLOSE] = BIT(24),
+ /* Bit 25 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(26),
+ [AGGR_GRAN_SEL] = BIT(27),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_LIMIT] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_GRAN_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(2, 0),
+ /* Bits 3-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-8 reserved */
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00003008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000300c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00003010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000301c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00003030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00003034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00003038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
+ [FLAVOR_0] = &ipa_reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [SRC_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_45_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_45_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_45_rsrc_type,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v4_5 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};
diff --git a/drivers/net/ipa/reg/ipa_reg-v4.9.c b/drivers/net/ipa/reg/ipa_reg-v4.9.c
new file mode 100644
index 000000000000..eabbc5451937
--- /dev/null
+++ b/drivers/net/ipa/reg/ipa_reg-v4.9.c
@@ -0,0 +1,509 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* Copyright (C) 2022 Linaro Ltd. */
+
+#include <linux/types.h>
+
+#include "../ipa.h"
+#include "../ipa_reg.h"
+
+static const u32 ipa_reg_comp_cfg_fmask[] = {
+ [RAM_ARB_PRI_CLIENT_SAMP_FIX_DIS] = BIT(0),
+ [GSI_SNOC_BYPASS_DIS] = BIT(1),
+ [GEN_QMB_0_SNOC_BYPASS_DIS] = BIT(2),
+ [GEN_QMB_1_SNOC_BYPASS_DIS] = BIT(3),
+ /* Bit 4 reserved */
+ [IPA_QMB_SELECT_CONS_EN] = BIT(5),
+ [IPA_QMB_SELECT_PROD_EN] = BIT(6),
+ [GSI_MULTI_INORDER_RD_DIS] = BIT(7),
+ [GSI_MULTI_INORDER_WR_DIS] = BIT(8),
+ [GEN_QMB_0_MULTI_INORDER_RD_DIS] = BIT(9),
+ [GEN_QMB_1_MULTI_INORDER_RD_DIS] = BIT(10),
+ [GEN_QMB_0_MULTI_INORDER_WR_DIS] = BIT(11),
+ [GEN_QMB_1_MULTI_INORDER_WR_DIS] = BIT(12),
+ [GEN_QMB_0_SNOC_CNOC_LOOP_PROT_DIS] = BIT(13),
+ [GSI_SNOC_CNOC_LOOP_PROT_DISABLE] = BIT(14),
+ [GSI_MULTI_AXI_MASTERS_DIS] = BIT(15),
+ [IPA_QMB_SELECT_GLOBAL_EN] = BIT(16),
+ [FULL_FLUSH_WAIT_RS_CLOSURE_EN] = BIT(17),
+ [QMB_RAM_RD_CACHE_DISABLE] = BIT(19),
+ [GENQMB_AOOOWR] = BIT(20),
+ [IF_OUT_OF_BUF_STOP_RESET_MASK_EN] = BIT(21),
+ [ATOMIC_FETCHER_ARB_LOCK_DIS] = GENMASK(24, 22),
+ /* Bits 25-29 reserved */
+ [GEN_QMB_1_DYNAMIC_ASIZE] = BIT(30),
+ [GEN_QMB_0_DYNAMIC_ASIZE] = BIT(31),
+};
+
+IPA_REG_FIELDS(COMP_CFG, comp_cfg, 0x0000003c);
+
+static const u32 ipa_reg_clkon_cfg_fmask[] = {
+ [CLKON_RX] = BIT(0),
+ [CLKON_PROC] = BIT(1),
+ [TX_WRAPPER] = BIT(2),
+ [CLKON_MISC] = BIT(3),
+ [RAM_ARB] = BIT(4),
+ [FTCH_HPS] = BIT(5),
+ [FTCH_DPS] = BIT(6),
+ [CLKON_HPS] = BIT(7),
+ [CLKON_DPS] = BIT(8),
+ [RX_HPS_CMDQS] = BIT(9),
+ [HPS_DPS_CMDQS] = BIT(10),
+ [DPS_TX_CMDQS] = BIT(11),
+ [RSRC_MNGR] = BIT(12),
+ [CTX_HANDLER] = BIT(13),
+ [ACK_MNGR] = BIT(14),
+ [D_DCPH] = BIT(15),
+ [H_DCPH] = BIT(16),
+ [CLKON_DCMP] = BIT(17),
+ [NTF_TX_CMDQS] = BIT(18),
+ [CLKON_TX_0] = BIT(19),
+ [CLKON_TX_1] = BIT(20),
+ [CLKON_FNR] = BIT(21),
+ [QSB2AXI_CMDQ_L] = BIT(22),
+ [AGGR_WRAPPER] = BIT(23),
+ [RAM_SLAVEWAY] = BIT(24),
+ [CLKON_QMB] = BIT(25),
+ [WEIGHT_ARB] = BIT(26),
+ [GSI_IF] = BIT(27),
+ [CLKON_GLOBAL] = BIT(28),
+ [GLOBAL_2X_CLK] = BIT(29),
+ [DPL_FIFO] = BIT(30),
+ [DRBIP] = BIT(31),
+};
+
+IPA_REG_FIELDS(CLKON_CFG, clkon_cfg, 0x00000044);
+
+static const u32 ipa_reg_route_fmask[] = {
+ [ROUTE_DIS] = BIT(0),
+ [ROUTE_DEF_PIPE] = GENMASK(5, 1),
+ [ROUTE_DEF_HDR_TABLE] = BIT(6),
+ [ROUTE_DEF_HDR_OFST] = GENMASK(16, 7),
+ [ROUTE_FRAG_DEF_PIPE] = GENMASK(21, 17),
+ /* Bits 22-23 reserved */
+ [ROUTE_DEF_RETAIN_HDR] = BIT(24),
+ /* Bits 25-31 reserved */
+};
+
+IPA_REG_FIELDS(ROUTE, route, 0x00000048);
+
+static const u32 ipa_reg_shared_mem_size_fmask[] = {
+ [MEM_SIZE] = GENMASK(15, 0),
+ [MEM_BADDR] = GENMASK(31, 16),
+};
+
+IPA_REG_FIELDS(SHARED_MEM_SIZE, shared_mem_size, 0x00000054);
+
+static const u32 ipa_reg_qsb_max_writes_fmask[] = {
+ [GEN_QMB_0_MAX_WRITES] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_WRITES] = GENMASK(7, 4),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_FIELDS(QSB_MAX_WRITES, qsb_max_writes, 0x00000074);
+
+static const u32 ipa_reg_qsb_max_reads_fmask[] = {
+ [GEN_QMB_0_MAX_READS] = GENMASK(3, 0),
+ [GEN_QMB_1_MAX_READS] = GENMASK(7, 4),
+ /* Bits 8-15 reserved */
+ [GEN_QMB_0_MAX_READS_BEATS] = GENMASK(23, 16),
+ [GEN_QMB_1_MAX_READS_BEATS] = GENMASK(31, 24),
+};
+
+IPA_REG_FIELDS(QSB_MAX_READS, qsb_max_reads, 0x00000078);
+
+static const u32 ipa_reg_filt_rout_hash_en_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_EN, filt_rout_hash_en, 0x0000148);
+
+static const u32 ipa_reg_filt_rout_hash_flush_fmask[] = {
+ [IPV6_ROUTER_HASH] = BIT(0),
+ /* Bits 1-3 reserved */
+ [IPV6_FILTER_HASH] = BIT(4),
+ /* Bits 5-7 reserved */
+ [IPV4_ROUTER_HASH] = BIT(8),
+ /* Bits 9-11 reserved */
+ [IPV4_FILTER_HASH] = BIT(12),
+ /* Bits 13-31 reserved */
+};
+
+IPA_REG_FIELDS(FILT_ROUT_HASH_FLUSH, filt_rout_hash_flush, 0x000014c);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(STATE_AGGR_ACTIVE, state_aggr_active, 0x000000b4);
+
+static const u32 ipa_reg_local_pkt_proc_cntxt_fmask[] = {
+ [IPA_BASE_ADDR] = GENMASK(17, 0),
+ /* Bits 18-31 reserved */
+};
+
+/* Offset must be a multiple of 8 */
+IPA_REG_FIELDS(LOCAL_PKT_PROC_CNTXT, local_pkt_proc_cntxt, 0x000001e8);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(AGGR_FORCE_CLOSE, aggr_force_close, 0x000001ec);
+
+static const u32 ipa_reg_ipa_tx_cfg_fmask[] = {
+ /* Bits 0-1 reserved */
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX0] = GENMASK(5, 2),
+ [DMAW_SCND_OUTSD_PRED_THRESHOLD] = GENMASK(9, 6),
+ [DMAW_SCND_OUTSD_PRED_EN] = BIT(10),
+ [DMAW_MAX_BEATS_256_DIS] = BIT(11),
+ [PA_MASK_EN] = BIT(12),
+ [PREFETCH_ALMOST_EMPTY_SIZE_TX1] = GENMASK(16, 13),
+ [DUAL_TX_ENABLE] = BIT(17),
+ [SSPND_PA_NO_START_STATE] = BIT(18),
+ /* Bits 19-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_TX_CFG, ipa_tx_cfg, 0x000001fc);
+
+static const u32 ipa_reg_flavor_0_fmask[] = {
+ [MAX_PIPES] = GENMASK(3, 0),
+ /* Bits 4-7 reserved */
+ [MAX_CONS_PIPES] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [MAX_PROD_PIPES] = GENMASK(20, 16),
+ /* Bits 21-23 reserved */
+ [PROD_LOWEST] = GENMASK(27, 24),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_FIELDS(FLAVOR_0, flavor_0, 0x00000210);
+
+static const u32 ipa_reg_idle_indication_cfg_fmask[] = {
+ [ENTER_IDLE_DEBOUNCE_THRESH] = GENMASK(15, 0),
+ [CONST_NON_IDLE_ENABLE] = BIT(16),
+ /* Bits 17-31 reserved */
+};
+
+IPA_REG_FIELDS(IDLE_INDICATION_CFG, idle_indication_cfg, 0x00000240);
+
+static const u32 ipa_reg_qtime_timestamp_cfg_fmask[] = {
+ [DPL_TIMESTAMP_LSB] = GENMASK(4, 0),
+ /* Bits 5-6 reserved */
+ [DPL_TIMESTAMP_SEL] = BIT(7),
+ [TAG_TIMESTAMP_LSB] = GENMASK(12, 8),
+ /* Bits 13-15 reserved */
+ [NAT_TIMESTAMP_LSB] = GENMASK(20, 16),
+ /* Bits 21-31 reserved */
+};
+
+IPA_REG_FIELDS(QTIME_TIMESTAMP_CFG, qtime_timestamp_cfg, 0x0000024c);
+
+static const u32 ipa_reg_timers_xo_clk_div_cfg_fmask[] = {
+ [DIV_VALUE] = GENMASK(8, 0),
+ /* Bits 9-30 reserved */
+ [DIV_ENABLE] = BIT(31),
+};
+
+IPA_REG_FIELDS(TIMERS_XO_CLK_DIV_CFG, timers_xo_clk_div_cfg, 0x00000250);
+
+static const u32 ipa_reg_timers_pulse_gran_cfg_fmask[] = {
+ [PULSE_GRAN_0] = GENMASK(2, 0),
+ [PULSE_GRAN_1] = GENMASK(5, 3),
+ [PULSE_GRAN_2] = GENMASK(8, 6),
+};
+
+IPA_REG_FIELDS(TIMERS_PULSE_GRAN_CFG, timers_pulse_gran_cfg, 0x00000254);
+
+static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type,
+ 0x00000400, 0x0020);
+
+static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type,
+ 0x00000404, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type,
+ 0x00000500, 0x0020);
+
+static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = {
+ [X_MIN_LIM] = GENMASK(5, 0),
+ /* Bits 6-7 reserved */
+ [X_MAX_LIM] = GENMASK(13, 8),
+ /* Bits 14-15 reserved */
+ [Y_MIN_LIM] = GENMASK(21, 16),
+ /* Bits 22-23 reserved */
+ [Y_MAX_LIM] = GENMASK(29, 24),
+ /* Bits 30-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type,
+ 0x00000504, 0x0020);
+
+static const u32 ipa_reg_endp_init_cfg_fmask[] = {
+ [FRAG_OFFLOAD_EN] = BIT(0),
+ [CS_OFFLOAD_EN] = GENMASK(2, 1),
+ [CS_METADATA_HDR_OFFSET] = GENMASK(6, 3),
+ /* Bit 7 reserved */
+ [CS_GEN_QMB_MASTER_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_CFG, endp_init_cfg, 0x00000808, 0x0070);
+
+static const u32 ipa_reg_endp_init_nat_fmask[] = {
+ [NAT_EN] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_NAT, endp_init_nat, 0x0000080c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_fmask[] = {
+ [HDR_LEN] = GENMASK(5, 0),
+ [HDR_OFST_METADATA_VALID] = BIT(6),
+ [HDR_OFST_METADATA] = GENMASK(12, 7),
+ [HDR_ADDITIONAL_CONST_LEN] = GENMASK(18, 13),
+ [HDR_OFST_PKT_SIZE_VALID] = BIT(19),
+ [HDR_OFST_PKT_SIZE] = GENMASK(25, 20),
+ [HDR_LEN_INC_DEAGG_HDR] = BIT(27),
+ [HDR_LEN_MSB] = GENMASK(29, 28),
+ [HDR_OFST_METADATA_MSB] = GENMASK(31, 30),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR, endp_init_hdr, 0x00000810, 0x0070);
+
+static const u32 ipa_reg_endp_init_hdr_ext_fmask[] = {
+ [HDR_ENDIANNESS] = BIT(0),
+ [HDR_TOTAL_LEN_OR_PAD_VALID] = BIT(1),
+ [HDR_TOTAL_LEN_OR_PAD] = BIT(2),
+ [HDR_PAYLOAD_LEN_INC_PADDING] = BIT(3),
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET] = GENMASK(9, 4),
+ [HDR_PAD_TO_ALIGNMENT] = GENMASK(13, 10),
+ /* Bits 14-15 reserved */
+ [HDR_TOTAL_LEN_OR_PAD_OFFSET_MSB] = GENMASK(17, 16),
+ [HDR_OFST_PKT_SIZE_MSB] = GENMASK(19, 18),
+ [HDR_ADDITIONAL_CONST_LEN_MSB] = GENMASK(21, 20),
+ /* Bits 22-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HDR_EXT, endp_init_hdr_ext, 0x00000814, 0x0070);
+
+IPA_REG_STRIDE(ENDP_INIT_HDR_METADATA_MASK, endp_init_hdr_metadata_mask,
+ 0x00000818, 0x0070);
+
+static const u32 ipa_reg_endp_init_mode_fmask[] = {
+ [ENDP_MODE] = GENMASK(2, 0),
+ [DCPH_ENABLE] = BIT(3),
+ [DEST_PIPE_INDEX] = GENMASK(8, 4),
+ /* Bits 9-11 reserved */
+ [BYTE_THRESHOLD] = GENMASK(27, 12),
+ [PIPE_REPLICATION_EN] = BIT(28),
+ [PAD_EN] = BIT(29),
+ [DRBIP_ACL_ENABLE] = BIT(30),
+ /* Bit 31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_MODE, endp_init_mode, 0x00000820, 0x0070);
+
+static const u32 ipa_reg_endp_init_aggr_fmask[] = {
+ [AGGR_EN] = GENMASK(1, 0),
+ [AGGR_TYPE] = GENMASK(4, 2),
+ [BYTE_LIMIT] = GENMASK(10, 5),
+ /* Bit 11 reserved */
+ [TIME_LIMIT] = GENMASK(16, 12),
+ [PKT_LIMIT] = GENMASK(22, 17),
+ [SW_EOF_ACTIVE] = BIT(23),
+ [FORCE_CLOSE] = BIT(24),
+ /* Bit 25 reserved */
+ [HARD_BYTE_LIMIT_EN] = BIT(26),
+ [AGGR_GRAN_SEL] = BIT(27),
+ /* Bits 28-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_AGGR, endp_init_aggr, 0x00000824, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_en_fmask[] = {
+ [HOL_BLOCK_EN] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_EN, endp_init_hol_block_en,
+ 0x0000082c, 0x0070);
+
+static const u32 ipa_reg_endp_init_hol_block_timer_fmask[] = {
+ [TIMER_LIMIT] = GENMASK(4, 0),
+ /* Bits 5-7 reserved */
+ [TIMER_GRAN_SEL] = BIT(8),
+ /* Bits 9-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_HOL_BLOCK_TIMER, endp_init_hol_block_timer,
+ 0x00000830, 0x0070);
+
+static const u32 ipa_reg_endp_init_deaggr_fmask[] = {
+ [DEAGGR_HDR_LEN] = GENMASK(5, 0),
+ [SYSPIPE_ERR_DETECTION] = BIT(6),
+ [PACKET_OFFSET_VALID] = BIT(7),
+ [PACKET_OFFSET_LOCATION] = GENMASK(13, 8),
+ [IGNORE_MIN_PKT_ERR] = BIT(14),
+ /* Bit 15 reserved */
+ [MAX_PACKET_LEN] = GENMASK(31, 16),
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_DEAGGR, endp_init_deaggr, 0x00000834, 0x0070);
+
+static const u32 ipa_reg_endp_init_rsrc_grp_fmask[] = {
+ [ENDP_RSRC_GRP] = GENMASK(1, 0),
+ /* Bits 2-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_RSRC_GRP, endp_init_rsrc_grp,
+ 0x00000838, 0x0070);
+
+static const u32 ipa_reg_endp_init_seq_fmask[] = {
+ [SEQ_TYPE] = GENMASK(7, 0),
+ /* Bits 8-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_INIT_SEQ, endp_init_seq, 0x0000083c, 0x0070);
+
+static const u32 ipa_reg_endp_status_fmask[] = {
+ [STATUS_EN] = BIT(0),
+ [STATUS_ENDP] = GENMASK(5, 1),
+ /* Bits 6-8 reserved */
+ [STATUS_PKT_SUPPRESS] = BIT(9),
+ /* Bits 10-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_STATUS, endp_status, 0x00000840, 0x0070);
+
+static const u32 ipa_reg_endp_filter_router_hsh_cfg_fmask[] = {
+ [FILTER_HASH_MSK_SRC_ID] = BIT(0),
+ [FILTER_HASH_MSK_SRC_IP] = BIT(1),
+ [FILTER_HASH_MSK_DST_IP] = BIT(2),
+ [FILTER_HASH_MSK_SRC_PORT] = BIT(3),
+ [FILTER_HASH_MSK_DST_PORT] = BIT(4),
+ [FILTER_HASH_MSK_PROTOCOL] = BIT(5),
+ [FILTER_HASH_MSK_METADATA] = BIT(6),
+ [FILTER_HASH_MSK_ALL] = GENMASK(6, 0),
+ /* Bits 7-15 reserved */
+ [ROUTER_HASH_MSK_SRC_ID] = BIT(16),
+ [ROUTER_HASH_MSK_SRC_IP] = BIT(17),
+ [ROUTER_HASH_MSK_DST_IP] = BIT(18),
+ [ROUTER_HASH_MSK_SRC_PORT] = BIT(19),
+ [ROUTER_HASH_MSK_DST_PORT] = BIT(20),
+ [ROUTER_HASH_MSK_PROTOCOL] = BIT(21),
+ [ROUTER_HASH_MSK_METADATA] = BIT(22),
+ [ROUTER_HASH_MSK_ALL] = GENMASK(22, 16),
+ /* Bits 23-31 reserved */
+};
+
+IPA_REG_STRIDE_FIELDS(ENDP_FILTER_ROUTER_HSH_CFG, endp_filter_router_hsh_cfg,
+ 0x0000085c, 0x0070);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_STTS, ipa_irq_stts, 0x00004008 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_EN, ipa_irq_en, 0x0000400c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by enum ipa_irq_id; only used for GSI_EE_AP */
+IPA_REG(IPA_IRQ_CLR, ipa_irq_clr, 0x00004010 + 0x1000 * GSI_EE_AP);
+
+static const u32 ipa_reg_ipa_irq_uc_fmask[] = {
+ [UC_INTR] = BIT(0),
+ /* Bits 1-31 reserved */
+};
+
+IPA_REG_FIELDS(IPA_IRQ_UC, ipa_irq_uc, 0x0000401c + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_INFO, irq_suspend_info, 0x00004030 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_EN, irq_suspend_en, 0x00004034 + 0x1000 * GSI_EE_AP);
+
+/* Valid bits defined by ipa->available */
+IPA_REG(IRQ_SUSPEND_CLR, irq_suspend_clr, 0x00004038 + 0x1000 * GSI_EE_AP);
+
+static const struct ipa_reg *ipa_reg_array[] = {
+ [COMP_CFG] = &ipa_reg_comp_cfg,
+ [CLKON_CFG] = &ipa_reg_clkon_cfg,
+ [ROUTE] = &ipa_reg_route,
+ [SHARED_MEM_SIZE] = &ipa_reg_shared_mem_size,
+ [QSB_MAX_WRITES] = &ipa_reg_qsb_max_writes,
+ [QSB_MAX_READS] = &ipa_reg_qsb_max_reads,
+ [FILT_ROUT_HASH_EN] = &ipa_reg_filt_rout_hash_en,
+ [FILT_ROUT_HASH_FLUSH] = &ipa_reg_filt_rout_hash_flush,
+ [STATE_AGGR_ACTIVE] = &ipa_reg_state_aggr_active,
+ [LOCAL_PKT_PROC_CNTXT] = &ipa_reg_local_pkt_proc_cntxt,
+ [AGGR_FORCE_CLOSE] = &ipa_reg_aggr_force_close,
+ [IPA_TX_CFG] = &ipa_reg_ipa_tx_cfg,
+ [FLAVOR_0] = &ipa_reg_flavor_0,
+ [IDLE_INDICATION_CFG] = &ipa_reg_idle_indication_cfg,
+ [QTIME_TIMESTAMP_CFG] = &ipa_reg_qtime_timestamp_cfg,
+ [TIMERS_XO_CLK_DIV_CFG] = &ipa_reg_timers_xo_clk_div_cfg,
+ [TIMERS_PULSE_GRAN_CFG] = &ipa_reg_timers_pulse_gran_cfg,
+ [SRC_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_01_rsrc_type,
+ [SRC_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_src_rsrc_grp_23_rsrc_type,
+ [DST_RSRC_GRP_01_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_01_rsrc_type,
+ [DST_RSRC_GRP_23_RSRC_TYPE] = &ipa_reg_dst_rsrc_grp_23_rsrc_type,
+ [ENDP_INIT_CFG] = &ipa_reg_endp_init_cfg,
+ [ENDP_INIT_NAT] = &ipa_reg_endp_init_nat,
+ [ENDP_INIT_HDR] = &ipa_reg_endp_init_hdr,
+ [ENDP_INIT_HDR_EXT] = &ipa_reg_endp_init_hdr_ext,
+ [ENDP_INIT_HDR_METADATA_MASK] = &ipa_reg_endp_init_hdr_metadata_mask,
+ [ENDP_INIT_MODE] = &ipa_reg_endp_init_mode,
+ [ENDP_INIT_AGGR] = &ipa_reg_endp_init_aggr,
+ [ENDP_INIT_HOL_BLOCK_EN] = &ipa_reg_endp_init_hol_block_en,
+ [ENDP_INIT_HOL_BLOCK_TIMER] = &ipa_reg_endp_init_hol_block_timer,
+ [ENDP_INIT_DEAGGR] = &ipa_reg_endp_init_deaggr,
+ [ENDP_INIT_RSRC_GRP] = &ipa_reg_endp_init_rsrc_grp,
+ [ENDP_INIT_SEQ] = &ipa_reg_endp_init_seq,
+ [ENDP_STATUS] = &ipa_reg_endp_status,
+ [ENDP_FILTER_ROUTER_HSH_CFG] = &ipa_reg_endp_filter_router_hsh_cfg,
+ [IPA_IRQ_STTS] = &ipa_reg_ipa_irq_stts,
+ [IPA_IRQ_EN] = &ipa_reg_ipa_irq_en,
+ [IPA_IRQ_CLR] = &ipa_reg_ipa_irq_clr,
+ [IPA_IRQ_UC] = &ipa_reg_ipa_irq_uc,
+ [IRQ_SUSPEND_INFO] = &ipa_reg_irq_suspend_info,
+ [IRQ_SUSPEND_EN] = &ipa_reg_irq_suspend_en,
+ [IRQ_SUSPEND_CLR] = &ipa_reg_irq_suspend_clr,
+};
+
+const struct ipa_regs ipa_regs_v4_9 = {
+ .reg_count = ARRAY_SIZE(ipa_reg_array),
+ .reg = ipa_reg_array,
+};