diff options
Diffstat (limited to 'drivers/net/ipa/ipa_endpoint.c')
-rw-r--r-- | drivers/net/ipa/ipa_endpoint.c | 923 |
1 files changed, 525 insertions, 398 deletions
diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 03a170993420..093e11ec7c2d 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2021 Linaro Ltd. + * Copyright (C) 2019-2022 Linaro Ltd. */ #include <linux/types.h> @@ -23,12 +23,8 @@ #include "ipa_gsi.h" #include "ipa_power.h" -#define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0) - -#define IPA_REPLENISH_BATCH 16 - -/* RX buffer is 1 page (or a power-of-2 contiguous pages) */ -#define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */ +/* Hardware is told about receive buffers once a "batch" has been queued */ +#define IPA_REPLENISH_BATCH 16 /* Must be non-zero */ /* The amount of RX buffer space consumed by standard skb overhead */ #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0)) @@ -37,7 +33,6 @@ #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */ #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3 -#define IPA_AGGR_TIME_LIMIT 500 /* microseconds */ /** enum ipa_status_opcode - status element opcode hardware values */ enum ipa_status_opcode { @@ -75,6 +70,24 @@ struct ipa_status { #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22) #define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16) +/* Compute the aggregation size value to use for a given buffer size */ +static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit) +{ + /* A hard aggregation limit will not be crossed; aggregation closes + * if saving incoming data would cross the hard byte limit boundary. + * + * With a soft limit, aggregation closes *after* the size boundary + * has been crossed. In that case the limit must leave enough space + * after that limit to receive a full MTU of data plus overhead. + */ + if (!aggr_hard_limit) + rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; + + /* The byte limit is encoded as a number of kilobytes */ + + return rx_buffer_size / SZ_1K; +} + static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, const struct ipa_gsi_endpoint_data *all_data, const struct ipa_gsi_endpoint_data *data) @@ -87,6 +100,12 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, return true; if (!data->toward_ipa) { + const struct ipa_endpoint_rx *rx_config; + const struct ipa_reg *reg; + u32 buffer_size; + u32 aggr_size; + u32 limit; + if (data->endpoint.filter_support) { dev_err(dev, "filtering not supported for " "RX endpoint %u\n", @@ -94,9 +113,77 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, return false; } + /* Nothing more to check for non-AP RX */ + if (data->ee_id != GSI_EE_AP) + return true; + + rx_config = &data->endpoint.config.rx; + + /* The buffer size must hold an MTU plus overhead */ + buffer_size = rx_config->buffer_size; + limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD; + if (buffer_size < limit) { + dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n", + data->endpoint_id, buffer_size, limit); + return false; + } + + if (!data->endpoint.config.aggregation) { + bool result = true; + + /* No aggregation; check for bogus aggregation data */ + if (rx_config->aggr_time_limit) { + dev_err(dev, + "time limit with no aggregation for RX endpoint %u\n", + data->endpoint_id); + result = false; + } + + if (rx_config->aggr_hard_limit) { + dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n", + data->endpoint_id); + result = false; + } + + if (rx_config->aggr_close_eof) { + dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n", + data->endpoint_id); + result = false; + } + + return result; /* Nothing more to check */ + } + + /* For an endpoint supporting receive aggregation, the byte + * limit defines the point at which aggregation closes. This + * check ensures the receive buffer size doesn't result in a + * limit that exceeds what's representable in the aggregation + * byte limit field. + */ + aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD, + rx_config->aggr_hard_limit); + reg = ipa_reg(ipa, ENDP_INIT_AGGR); + + limit = ipa_reg_field_max(reg, BYTE_LIMIT); + if (aggr_size > limit) { + dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n", + data->endpoint_id, aggr_size, limit); + + return false; + } + return true; /* Nothing more to check for RX */ } + /* Starting with IPA v4.5 sequencer replication is obsolete */ + if (ipa->version >= IPA_VERSION_4_5) { + if (data->endpoint.config.tx.seq_rep_type) { + dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n", + data->endpoint_id); + return false; + } + } + if (data->endpoint.config.status_enable) { other_name = data->endpoint.config.tx.status_endpoint; if (other_name >= count) { @@ -156,21 +243,12 @@ static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count, return true; } -static u32 aggr_byte_limit_max(enum ipa_version version) -{ - if (version < IPA_VERSION_4_5) - return field_max(aggr_byte_limit_fmask(true)); - - return field_max(aggr_byte_limit_fmask(false)); -} - static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, const struct ipa_gsi_endpoint_data *data) { const struct ipa_gsi_endpoint_data *dp = data; struct device *dev = &ipa->pdev->dev; enum ipa_endpoint_name name; - u32 limit; if (count > IPA_ENDPOINT_COUNT) { dev_err(dev, "too many endpoints specified (%u > %u)\n", @@ -178,26 +256,6 @@ static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count, return false; } - /* The aggregation byte limit defines the point at which an - * aggregation window will close. It is programmed into the - * IPA hardware as a number of KB. We don't use "hard byte - * limit" aggregation, which means that we need to supply - * enough space in a receive buffer to hold a complete MTU - * plus normal skb overhead *after* that aggregation byte - * limit has been crossed. - * - * This check ensures we don't define a receive buffer size - * that would exceed what we can represent in the field that - * is used to program its size. - */ - limit = aggr_byte_limit_max(ipa->version) * SZ_1K; - limit += IPA_MTU + IPA_RX_BUFFER_OVERHEAD; - if (limit < IPA_RX_BUFFER_SIZE) { - dev_err(dev, "buffer size too big for aggregation (%u > %u)\n", - IPA_RX_BUFFER_SIZE, limit); - return false; - } - /* Make sure needed endpoints have defined data */ if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) { dev_err(dev, "command TX endpoint not defined\n"); @@ -237,28 +295,32 @@ static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint, } /* suspend_delay represents suspend for RX, delay for TX endpoints. - * Note that suspend is not supported starting with IPA v4.0. + * Note that suspend is not supported starting with IPA v4.0, and + * delay mode should not be used starting with IPA v4.2. */ static bool ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) { - u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id); struct ipa *ipa = endpoint->ipa; + const struct ipa_reg *reg; + u32 field_id; + u32 offset; bool state; u32 mask; u32 val; - /* Suspend is not supported for IPA v4.0+. Delay doesn't work - * correctly on IPA v4.2. - */ if (endpoint->toward_ipa) - WARN_ON(ipa->version == IPA_VERSION_4_2); + WARN_ON(ipa->version >= IPA_VERSION_4_2); else WARN_ON(ipa->version >= IPA_VERSION_4_0); - mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK; - + reg = ipa_reg(ipa, ENDP_INIT_CTRL); + offset = ipa_reg_n_offset(reg, endpoint->endpoint_id); val = ioread32(ipa->reg_virt + offset); + + field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND; + mask = ipa_reg_bit(reg, field_id); + state = !!(val & mask); /* Don't bother if it's already in the requested state */ @@ -270,28 +332,28 @@ ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay) return state; } -/* We currently don't care what the previous state was for delay mode */ +/* We don't care what the previous state was for delay mode */ static void ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable) { + /* Delay mode should not be used for IPA v4.2+ */ + WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2); WARN_ON(!endpoint->toward_ipa); - /* Delay mode doesn't work properly for IPA v4.2 */ - if (endpoint->ipa->version != IPA_VERSION_4_2) - (void)ipa_endpoint_init_ctrl(endpoint, enable); + (void)ipa_endpoint_init_ctrl(endpoint, enable); } static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint) { u32 mask = BIT(endpoint->endpoint_id); struct ipa *ipa = endpoint->ipa; - u32 offset; + const struct ipa_reg *reg; u32 val; WARN_ON(!(mask & ipa->available)); - offset = ipa_reg_state_aggr_active_offset(ipa->version); - val = ioread32(ipa->reg_virt + offset); + reg = ipa_reg(ipa, STATE_AGGR_ACTIVE); + val = ioread32(ipa->reg_virt + ipa_reg_offset(reg)); return !!(val & mask); } @@ -300,10 +362,12 @@ static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint) { u32 mask = BIT(endpoint->endpoint_id); struct ipa *ipa = endpoint->ipa; + const struct ipa_reg *reg; WARN_ON(!(mask & ipa->available)); - iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET); + reg = ipa_reg(ipa, AGGR_FORCE_CLOSE); + iowrite32(mask, ipa->reg_virt + ipa_reg_offset(reg)); } /** @@ -319,7 +383,7 @@ static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint) { struct ipa *ipa = endpoint->ipa; - if (!endpoint->data->aggregation) + if (!endpoint->config.aggregation) return; /* Nothing to do if the endpoint doesn't have aggregation open */ @@ -355,26 +419,29 @@ ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable) return suspended; } -/* Enable or disable delay or suspend mode on all modem endpoints */ +/* Put all modem RX endpoints into suspend mode, and stop transmission + * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is + * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow + * control instead. + */ void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable) { u32 endpoint_id; - /* DELAY mode doesn't work correctly on IPA v4.2 */ - if (ipa->version == IPA_VERSION_4_2) - return; - for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) { struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id]; if (endpoint->ee_id != GSI_EE_MODEM) continue; - /* Set TX delay mode or RX suspend mode */ - if (endpoint->toward_ipa) + if (!endpoint->toward_ipa) + (void)ipa_endpoint_program_suspend(endpoint, enable); + else if (ipa->version < IPA_VERSION_4_2) ipa_endpoint_program_delay(endpoint, enable); else - (void)ipa_endpoint_program_suspend(endpoint, enable); + gsi_modem_channel_flow_control(&ipa->gsi, + endpoint->channel_id, + enable); } } @@ -385,12 +452,10 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) struct gsi_trans *trans; u32 count; - /* We need one command per modem TX endpoint. We can get an upper - * bound on that by assuming all initialized endpoints are modem->IPA. - * That won't happen, and we could be more precise, but this is fine - * for now. End the transaction with commands to clear the pipeline. + /* We need one command per modem TX endpoint, plus the commands + * that clear the pipeline. */ - count = hweight32(initialized) + ipa_cmd_pipeline_clear_count(); + count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count(); trans = ipa_cmd_trans_alloc(ipa, count); if (!trans) { dev_err(&ipa->pdev->dev, @@ -401,6 +466,7 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) while (initialized) { u32 endpoint_id = __ffs(initialized); struct ipa_endpoint *endpoint; + const struct ipa_reg *reg; u32 offset; initialized ^= BIT(endpoint_id); @@ -410,7 +476,8 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa)) continue; - offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); + reg = ipa_reg(ipa, ENDP_STATUS); + offset = ipa_reg_n_offset(reg, endpoint_id); /* Value written is 0, and all bits are updated. That * means status is disabled on the endpoint, and as a @@ -421,7 +488,6 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) ipa_cmd_pipeline_clear_add(trans); - /* XXX This should have a 1 second timeout */ gsi_trans_commit_wait(trans); ipa_cmd_pipeline_clear_wait(ipa); @@ -431,22 +497,23 @@ int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa) static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) { - u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id); + u32 endpoint_id = endpoint->endpoint_id; + struct ipa *ipa = endpoint->ipa; enum ipa_cs_offload_en enabled; + const struct ipa_reg *reg; u32 val = 0; + reg = ipa_reg(ipa, ENDP_INIT_CFG); /* FRAG_OFFLOAD_EN is 0 */ - if (endpoint->data->checksum) { - enum ipa_version version = endpoint->ipa->version; + if (endpoint->config.checksum) { + enum ipa_version version = ipa->version; if (endpoint->toward_ipa) { - u32 checksum_offset; + u32 off; /* Checksum header offset is in 4-byte units */ - checksum_offset = sizeof(struct rmnet_map_header); - checksum_offset /= sizeof(u32); - val |= u32_encode_bits(checksum_offset, - CS_METADATA_HDR_OFFSET_FMASK); + off = sizeof(struct rmnet_map_header) / sizeof(u32); + val |= ipa_reg_encode(reg, CS_METADATA_HDR_OFFSET, off); enabled = version < IPA_VERSION_4_5 ? IPA_CS_OFFLOAD_UL @@ -459,24 +526,26 @@ static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint) } else { enabled = IPA_CS_OFFLOAD_NONE; } - val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK); + val |= ipa_reg_encode(reg, CS_OFFLOAD_EN, enabled); /* CS_GEN_QMB_MASTER_SEL is 0 */ - iowrite32(val, endpoint->ipa->reg_virt + offset); + iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); } static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint) { - u32 offset; + u32 endpoint_id = endpoint->endpoint_id; + struct ipa *ipa = endpoint->ipa; + const struct ipa_reg *reg; u32 val; if (!endpoint->toward_ipa) return; - offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id); - val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK); + reg = ipa_reg(ipa, ENDP_INIT_NAT); + val = ipa_reg_encode(reg, NAT_EN, IPA_NAT_BYPASS); - iowrite32(val, endpoint->ipa->reg_virt + offset); + iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); } static u32 @@ -485,7 +554,7 @@ ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint) u32 header_size = sizeof(struct rmnet_map_header); /* Without checksum offload, we just have the MAP header */ - if (!endpoint->data->checksum) + if (!endpoint->config.checksum) return header_size; if (version < IPA_VERSION_4_5) { @@ -500,6 +569,50 @@ ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint) return header_size; } +/* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */ +static u32 ipa_header_size_encode(enum ipa_version version, + const struct ipa_reg *reg, u32 header_size) +{ + u32 field_max = ipa_reg_field_max(reg, HDR_LEN); + u32 val; + + /* We know field_max can be used as a mask (2^n - 1) */ + val = ipa_reg_encode(reg, HDR_LEN, header_size & field_max); + if (version < IPA_VERSION_4_5) { + WARN_ON(header_size > field_max); + return val; + } + + /* IPA v4.5 adds a few more most-significant bits */ + header_size >>= hweight32(field_max); + WARN_ON(header_size > ipa_reg_field_max(reg, HDR_LEN_MSB)); + val |= ipa_reg_encode(reg, HDR_LEN_MSB, header_size); + + return val; +} + +/* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */ +static u32 ipa_metadata_offset_encode(enum ipa_version version, + const struct ipa_reg *reg, u32 offset) +{ + u32 field_max = ipa_reg_field_max(reg, HDR_OFST_METADATA); + u32 val; + + /* We know field_max can be used as a mask (2^n - 1) */ + val = ipa_reg_encode(reg, HDR_OFST_METADATA, offset); + if (version < IPA_VERSION_4_5) { + WARN_ON(offset > field_max); + return val; + } + + /* IPA v4.5 adds a few more most-significant bits */ + offset >>= hweight32(field_max); + WARN_ON(offset > ipa_reg_field_max(reg, HDR_OFST_METADATA_MSB)); + val |= ipa_reg_encode(reg, HDR_OFST_METADATA_MSB, offset); + + return val; +} + /** * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register * @endpoint: Endpoint pointer @@ -523,36 +636,38 @@ ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint) */ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) { - u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id); + u32 endpoint_id = endpoint->endpoint_id; struct ipa *ipa = endpoint->ipa; + const struct ipa_reg *reg; u32 val = 0; - if (endpoint->data->qmap) { + reg = ipa_reg(ipa, ENDP_INIT_HDR); + if (endpoint->config.qmap) { enum ipa_version version = ipa->version; size_t header_size; header_size = ipa_qmap_header_size(version, endpoint); - val = ipa_header_size_encoded(version, header_size); + val = ipa_header_size_encode(version, reg, header_size); /* Define how to fill fields in a received QMAP header */ if (!endpoint->toward_ipa) { - u32 offset; /* Field offset within header */ + u32 off; /* Field offset within header */ /* Where IPA will write the metadata value */ - offset = offsetof(struct rmnet_map_header, mux_id); - val |= ipa_metadata_offset_encoded(version, offset); + off = offsetof(struct rmnet_map_header, mux_id); + val |= ipa_metadata_offset_encode(version, reg, off); /* Where IPA will write the length */ - offset = offsetof(struct rmnet_map_header, pkt_len); + off = offsetof(struct rmnet_map_header, pkt_len); /* Upper bits are stored in HDR_EXT with IPA v4.5 */ if (version >= IPA_VERSION_4_5) - offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK); + off &= ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE); - val |= HDR_OFST_PKT_SIZE_VALID_FMASK; - val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK); + val |= ipa_reg_bit(reg, HDR_OFST_PKT_SIZE_VALID); + val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE, off); } /* For QMAP TX, metadata offset is 0 (modem assumes this) */ - val |= HDR_OFST_METADATA_VALID_FMASK; + val |= ipa_reg_bit(reg, HDR_OFST_METADATA_VALID); /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */ /* HDR_A5_MUX is 0 */ @@ -560,228 +675,211 @@ static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint) /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */ } - iowrite32(val, ipa->reg_virt + offset); + iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); } static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) { - u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id); - u32 pad_align = endpoint->data->rx.pad_align; + u32 pad_align = endpoint->config.rx.pad_align; + u32 endpoint_id = endpoint->endpoint_id; struct ipa *ipa = endpoint->ipa; + const struct ipa_reg *reg; u32 val = 0; - val |= HDR_ENDIANNESS_FMASK; /* big endian */ - - /* A QMAP header contains a 6 bit pad field at offset 0. The RMNet - * driver assumes this field is meaningful in packets it receives, - * and assumes the header's payload length includes that padding. - * The RMNet driver does *not* pad packets it sends, however, so - * the pad field (although 0) should be ignored. - */ - if (endpoint->data->qmap && !endpoint->toward_ipa) { - val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK; - /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */ - val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK; - /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */ + reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT); + if (endpoint->config.qmap) { + /* We have a header, so we must specify its endianness */ + val |= ipa_reg_bit(reg, HDR_ENDIANNESS); /* big endian */ + + /* A QMAP header contains a 6 bit pad field at offset 0. + * The RMNet driver assumes this field is meaningful in + * packets it receives, and assumes the header's payload + * length includes that padding. The RMNet driver does + * *not* pad packets it sends, however, so the pad field + * (although 0) should be ignored. + */ + if (!endpoint->toward_ipa) { + val |= ipa_reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID); + /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */ + val |= ipa_reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING); + /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */ + } } /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */ if (!endpoint->toward_ipa) - val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK); + val |= ipa_reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align); /* IPA v4.5 adds some most-significant bits to a few fields, * two of which are defined in the HDR (not HDR_EXT) register. */ if (ipa->version >= IPA_VERSION_4_5) { /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */ - if (endpoint->data->qmap && !endpoint->toward_ipa) { - u32 offset; - - offset = offsetof(struct rmnet_map_header, pkt_len); - offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK); - val |= u32_encode_bits(offset, - HDR_OFST_PKT_SIZE_MSB_FMASK); + if (endpoint->config.qmap && !endpoint->toward_ipa) { + u32 mask = ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE); + u32 off; /* Field offset within header */ + + off = offsetof(struct rmnet_map_header, pkt_len); + /* Low bits are in the ENDP_INIT_HDR register */ + off >>= hweight32(mask); + val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off); /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */ } } - iowrite32(val, ipa->reg_virt + offset); + + iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); } static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) { u32 endpoint_id = endpoint->endpoint_id; + struct ipa *ipa = endpoint->ipa; + const struct ipa_reg *reg; u32 val = 0; u32 offset; if (endpoint->toward_ipa) return; /* Register not valid for TX endpoints */ - offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id); + reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK); + offset = ipa_reg_n_offset(reg, endpoint_id); /* Note that HDR_ENDIANNESS indicates big endian header fields */ - if (endpoint->data->qmap) + if (endpoint->config.qmap) val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK); - iowrite32(val, endpoint->ipa->reg_virt + offset); + iowrite32(val, ipa->reg_virt + offset); } static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint) { - u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id); + struct ipa *ipa = endpoint->ipa; + const struct ipa_reg *reg; + u32 offset; u32 val; if (!endpoint->toward_ipa) return; /* Register not valid for RX endpoints */ - if (endpoint->data->dma_mode) { - enum ipa_endpoint_name name = endpoint->data->dma_endpoint; - u32 dma_endpoint_id; + reg = ipa_reg(ipa, ENDP_INIT_MODE); + if (endpoint->config.dma_mode) { + enum ipa_endpoint_name name = endpoint->config.dma_endpoint; + u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id; - dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id; - - val = u32_encode_bits(IPA_DMA, MODE_FMASK); - val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK); + val = ipa_reg_encode(reg, ENDP_MODE, IPA_DMA); + val |= ipa_reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id); } else { - val = u32_encode_bits(IPA_BASIC, MODE_FMASK); + val = ipa_reg_encode(reg, ENDP_MODE, IPA_BASIC); } /* All other bits unspecified (and 0) */ - iowrite32(val, endpoint->ipa->reg_virt + offset); + offset = ipa_reg_n_offset(reg, endpoint->endpoint_id); + iowrite32(val, ipa->reg_virt + offset); } -/* Compute the aggregation size value to use for a given buffer size */ -static u32 ipa_aggr_size_kb(u32 rx_buffer_size) +/* For IPA v4.5+, times are expressed using Qtime. The AP uses one of two + * pulse generators (0 and 1) to measure elapsed time. In ipa_qtime_config() + * they're configured to have granularity 100 usec and 1 msec, respectively. + * + * The return value is the positive or negative Qtime value to use to + * express the (microsecond) time provided. A positive return value + * means pulse generator 0 can be used; otherwise use pulse generator 1. + */ +static int ipa_qtime_val(u32 microseconds, u32 max) { - /* We don't use "hard byte limit" aggregation, so we define the - * aggregation limit such that our buffer has enough space *after* - * that limit to receive a full MTU of data, plus overhead. - */ - rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD; + u32 val; - return rx_buffer_size / SZ_1K; -} + /* Use 100 microsecond granularity if possible */ + val = DIV_ROUND_CLOSEST(microseconds, 100); + if (val <= max) + return (int)val; -/* Encoded values for AGGR endpoint register fields */ -static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit) -{ - if (version < IPA_VERSION_4_5) - return u32_encode_bits(limit, aggr_byte_limit_fmask(true)); + /* Have to use pulse generator 1 (millisecond granularity) */ + val = DIV_ROUND_CLOSEST(microseconds, 1000); + WARN_ON(val > max); - return u32_encode_bits(limit, aggr_byte_limit_fmask(false)); + return (int)-val; } /* Encode the aggregation timer limit (microseconds) based on IPA version */ -static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit) +static u32 aggr_time_limit_encode(struct ipa *ipa, const struct ipa_reg *reg, + u32 microseconds) { - u32 gran_sel; - u32 fmask; + u32 max; u32 val; - if (version < IPA_VERSION_4_5) { - /* We set aggregation granularity in ipa_hardware_config() */ - limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY); + if (!microseconds) + return 0; /* Nothing to compute if time limit is 0 */ - return u32_encode_bits(limit, aggr_time_limit_fmask(true)); - } + max = ipa_reg_field_max(reg, TIME_LIMIT); + if (ipa->version >= IPA_VERSION_4_5) { + u32 gran_sel; + int ret; + + /* Compute the Qtime limit value to use */ + ret = ipa_qtime_val(microseconds, max); + if (ret < 0) { + val = -ret; + gran_sel = ipa_reg_bit(reg, AGGR_GRAN_SEL); + } else { + val = ret; + gran_sel = 0; + } - /* IPA v4.5 expresses the time limit using Qtime. The AP has - * pulse generators 0 and 1 available, which were configured - * in ipa_qtime_config() to have granularity 100 usec and - * 1 msec, respectively. Use pulse generator 0 if possible, - * otherwise fall back to pulse generator 1. - */ - fmask = aggr_time_limit_fmask(false); - val = DIV_ROUND_CLOSEST(limit, 100); - if (val > field_max(fmask)) { - /* Have to use pulse generator 1 (millisecond granularity) */ - gran_sel = AGGR_GRAN_SEL_FMASK; - val = DIV_ROUND_CLOSEST(limit, 1000); - } else { - /* We can use pulse generator 0 (100 usec granularity) */ - gran_sel = 0; + return gran_sel | ipa_reg_encode(reg, TIME_LIMIT, val); } - return gran_sel | u32_encode_bits(val, fmask); -} + /* We program aggregation granularity in ipa_hardware_config() */ + val = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY); + WARN(val > max, "aggr_time_limit too large (%u > %u usec)\n", + microseconds, max * IPA_AGGR_GRANULARITY); -static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled) -{ - u32 val = enabled ? 1 : 0; - - if (version < IPA_VERSION_4_5) - return u32_encode_bits(val, aggr_sw_eof_active_fmask(true)); - - return u32_encode_bits(val, aggr_sw_eof_active_fmask(false)); + return ipa_reg_encode(reg, TIME_LIMIT, val); } static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint) { - u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id); - enum ipa_version version = endpoint->ipa->version; + u32 endpoint_id = endpoint->endpoint_id; + struct ipa *ipa = endpoint->ipa; + const struct ipa_reg *reg; u32 val = 0; - if (endpoint->data->aggregation) { + reg = ipa_reg(ipa, ENDP_INIT_AGGR); + if (endpoint->config.aggregation) { if (!endpoint->toward_ipa) { - bool close_eof; + const struct ipa_endpoint_rx *rx_config; + u32 buffer_size; u32 limit; - val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK); - val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK); + rx_config = &endpoint->config.rx; + val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR); + val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_GENERIC); - limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE); - val |= aggr_byte_limit_encoded(version, limit); + buffer_size = rx_config->buffer_size; + limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD, + rx_config->aggr_hard_limit); + val |= ipa_reg_encode(reg, BYTE_LIMIT, limit); - limit = IPA_AGGR_TIME_LIMIT; - val |= aggr_time_limit_encoded(version, limit); + limit = rx_config->aggr_time_limit; + val |= aggr_time_limit_encode(ipa, reg, limit); /* AGGR_PKT_LIMIT is 0 (unlimited) */ - close_eof = endpoint->data->rx.aggr_close_eof; - val |= aggr_sw_eof_active_encoded(version, close_eof); - - /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */ + if (rx_config->aggr_close_eof) + val |= ipa_reg_bit(reg, SW_EOF_ACTIVE); } else { - val |= u32_encode_bits(IPA_ENABLE_DEAGGR, - AGGR_EN_FMASK); - val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK); + val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR); + val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_QCMAP); /* other fields ignored */ } /* AGGR_FORCE_CLOSE is 0 */ /* AGGR_GRAN_SEL is 0 for IPA v4.5 */ } else { - val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK); + val |= ipa_reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR); /* other fields ignored */ } - iowrite32(val, endpoint->ipa->reg_virt + offset); -} - -/* Return the Qtime-based head-of-line blocking timer value that - * represents the given number of microseconds. The result - * includes both the timer value and the selected timer granularity. - */ -static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds) -{ - u32 gran_sel; - u32 val; - - /* IPA v4.5 expresses time limits using Qtime. The AP has - * pulse generators 0 and 1 available, which were configured - * in ipa_qtime_config() to have granularity 100 usec and - * 1 msec, respectively. Use pulse generator 0 if possible, - * otherwise fall back to pulse generator 1. - */ - val = DIV_ROUND_CLOSEST(microseconds, 100); - if (val > field_max(TIME_LIMIT_FMASK)) { - /* Have to use pulse generator 1 (millisecond granularity) */ - gran_sel = GRAN_SEL_FMASK; - val = DIV_ROUND_CLOSEST(microseconds, 1000); - } else { - /* We can use pulse generator 0 (100 usec granularity) */ - gran_sel = 0; - } - - return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK); + iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); } /* The head-of-line blocking timer is defined as a tick count. For @@ -789,12 +887,11 @@ static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds) * derived from the 19.2 MHz SoC XO clock. For older IPA versions * each tick represents 128 cycles of the IPA core clock. * - * Return the encoded value that should be written to that register - * that represents the timeout period provided. For IPA v4.2 this - * encodes a base and scale value, while for earlier versions the - * value is a simple tick count. + * Return the encoded value representing the timeout period provided + * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register. */ -static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds) +static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg, + u32 microseconds) { u32 width; u32 scale; @@ -806,18 +903,34 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds) if (!microseconds) return 0; /* Nothing to compute if timer period is 0 */ - if (ipa->version >= IPA_VERSION_4_5) - return hol_block_timer_qtime_val(ipa, microseconds); + if (ipa->version >= IPA_VERSION_4_5) { + u32 max = ipa_reg_field_max(reg, TIMER_LIMIT); + u32 gran_sel; + int ret; + + /* Compute the Qtime limit value to use */ + ret = ipa_qtime_val(microseconds, max); + if (ret < 0) { + val = -ret; + gran_sel = ipa_reg_bit(reg, TIMER_GRAN_SEL); + } else { + val = ret; + gran_sel = 0; + } + + return gran_sel | ipa_reg_encode(reg, TIMER_LIMIT, val); + } - /* Use 64 bit arithmetic to avoid overflow... */ + /* Use 64 bit arithmetic to avoid overflow */ rate = ipa_core_clock_rate(ipa); ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC); - /* ...but we still need to fit into a 32-bit register */ - WARN_ON(ticks > U32_MAX); + + /* We still need the result to fit into the field */ + WARN_ON(ticks > ipa_reg_field_max(reg, TIMER_BASE_VALUE)); /* IPA v3.5.1 through v4.1 just record the tick count */ if (ipa->version < IPA_VERSION_4_2) - return (u32)ticks; + return ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks); /* For IPA v4.2, the tick count is represented by base and * scale fields within the 32-bit timer register, where: @@ -827,8 +940,8 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds) * count, and extract the number of bits in the base field * such that high bit is included. */ - high = fls(ticks); /* 1..32 */ - width = HWEIGHT32(BASE_VALUE_FMASK); + high = fls(ticks); /* 1..32 (or warning above) */ + width = hweight32(ipa_reg_fmask(reg, TIMER_BASE_VALUE)); scale = high > width ? high - width : 0; if (scale) { /* If we're scaling, round up to get a closer result */ @@ -838,8 +951,8 @@ static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds) scale++; } - val = u32_encode_bits(scale, SCALE_FMASK); - val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK); + val = ipa_reg_encode(reg, TIMER_SCALE, scale); + val |= ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale); return val; } @@ -850,28 +963,47 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint, { u32 endpoint_id = endpoint->endpoint_id; struct ipa *ipa = endpoint->ipa; - u32 offset; + const struct ipa_reg *reg; u32 val; /* This should only be changed when HOL_BLOCK_EN is disabled */ - offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id); - val = hol_block_timer_val(ipa, microseconds); - iowrite32(val, ipa->reg_virt + offset); + reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER); + val = hol_block_timer_encode(ipa, reg, microseconds); + + iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); } static void -ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable) +ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable) { u32 endpoint_id = endpoint->endpoint_id; + struct ipa *ipa = endpoint->ipa; + const struct ipa_reg *reg; u32 offset; u32 val; - val = enable ? HOL_BLOCK_EN_FMASK : 0; - offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id); - iowrite32(val, endpoint->ipa->reg_virt + offset); + reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN); + offset = ipa_reg_n_offset(reg, endpoint_id); + val = enable ? ipa_reg_bit(reg, HOL_BLOCK_EN) : 0; + + iowrite32(val, ipa->reg_virt + offset); + /* When enabling, the register must be written twice for IPA v4.5+ */ - if (enable && endpoint->ipa->version >= IPA_VERSION_4_5) - iowrite32(val, endpoint->ipa->reg_virt + offset); + if (enable && ipa->version >= IPA_VERSION_4_5) + iowrite32(val, ipa->reg_virt + offset); +} + +/* Assumes HOL_BLOCK is in disabled state */ +static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, + u32 microseconds) +{ + ipa_endpoint_init_hol_block_timer(endpoint, microseconds); + ipa_endpoint_init_hol_block_en(endpoint, true); +} + +static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint) +{ + ipa_endpoint_init_hol_block_en(endpoint, false); } void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) @@ -884,54 +1016,65 @@ void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa) if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM) continue; - ipa_endpoint_init_hol_block_enable(endpoint, false); - ipa_endpoint_init_hol_block_timer(endpoint, 0); - ipa_endpoint_init_hol_block_enable(endpoint, true); + ipa_endpoint_init_hol_block_disable(endpoint); + ipa_endpoint_init_hol_block_enable(endpoint, 0); } } static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint) { - u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id); + u32 endpoint_id = endpoint->endpoint_id; + struct ipa *ipa = endpoint->ipa; + const struct ipa_reg *reg; u32 val = 0; if (!endpoint->toward_ipa) return; /* Register not valid for RX endpoints */ + reg = ipa_reg(ipa, ENDP_INIT_DEAGGR); /* DEAGGR_HDR_LEN is 0 */ /* PACKET_OFFSET_VALID is 0 */ /* PACKET_OFFSET_LOCATION is ignored (not valid) */ /* MAX_PACKET_LEN is 0 (not enforced) */ - iowrite32(val, endpoint->ipa->reg_virt + offset); + iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); } static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint) { - u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id); + u32 resource_group = endpoint->config.resource_group; + u32 endpoint_id = endpoint->endpoint_id; struct ipa *ipa = endpoint->ipa; + const struct ipa_reg *reg; u32 val; - val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group); - iowrite32(val, ipa->reg_virt + offset); + reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP); + val = ipa_reg_encode(reg, ENDP_RSRC_GRP, resource_group); + + iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); } static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) { - u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id); - u32 val = 0; + u32 endpoint_id = endpoint->endpoint_id; + struct ipa *ipa = endpoint->ipa; + const struct ipa_reg *reg; + u32 val; if (!endpoint->toward_ipa) return; /* Register not valid for RX endpoints */ + reg = ipa_reg(ipa, ENDP_INIT_SEQ); + /* Low-order byte configures primary packet processing */ - val |= u32_encode_bits(endpoint->data->tx.seq_type, SEQ_TYPE_FMASK); + val = ipa_reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type); - /* Second byte configures replicated packet processing */ - val |= u32_encode_bits(endpoint->data->tx.seq_rep_type, - SEQ_REP_TYPE_FMASK); + /* Second byte (if supported) configures replicated packet processing */ + if (ipa->version < IPA_VERSION_4_5) + val |= ipa_reg_encode(reg, SEQ_REP_TYPE, + endpoint->config.tx.seq_rep_type); - iowrite32(val, endpoint->ipa->reg_virt + offset); + iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); } /** @@ -952,7 +1095,7 @@ int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb) * If not, see if we can linearize it before giving up. */ nr_frags = skb_shinfo(skb)->nr_frags; - if (1 + nr_frags > endpoint->trans_tre_max) { + if (nr_frags > endpoint->skb_frag_max) { if (skb_linearize(skb)) return -E2BIG; nr_frags = 0; @@ -981,149 +1124,123 @@ static void ipa_endpoint_status(struct ipa_endpoint *endpoint) { u32 endpoint_id = endpoint->endpoint_id; struct ipa *ipa = endpoint->ipa; + const struct ipa_reg *reg; u32 val = 0; - u32 offset; - offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id); - - if (endpoint->data->status_enable) { - val |= STATUS_EN_FMASK; + reg = ipa_reg(ipa, ENDP_STATUS); + if (endpoint->config.status_enable) { + val |= ipa_reg_bit(reg, STATUS_EN); if (endpoint->toward_ipa) { enum ipa_endpoint_name name; u32 status_endpoint_id; - name = endpoint->data->tx.status_endpoint; + name = endpoint->config.tx.status_endpoint; status_endpoint_id = ipa->name_map[name]->endpoint_id; - val |= u32_encode_bits(status_endpoint_id, - STATUS_ENDP_FMASK); + val |= ipa_reg_encode(reg, STATUS_ENDP, + status_endpoint_id); } /* STATUS_LOCATION is 0, meaning status element precedes - * packet (not present for IPA v4.5) + * packet (not present for IPA v4.5+) */ - /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */ + /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */ } - iowrite32(val, ipa->reg_virt + offset); + iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); } -static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint) +static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint, + struct gsi_trans *trans) { - struct gsi_trans *trans; - bool doorbell = false; struct page *page; + u32 buffer_size; u32 offset; u32 len; int ret; - page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE)); + buffer_size = endpoint->config.rx.buffer_size; + page = dev_alloc_pages(get_order(buffer_size)); if (!page) return -ENOMEM; - trans = ipa_endpoint_trans_alloc(endpoint, 1); - if (!trans) - goto err_free_pages; - /* Offset the buffer to make space for skb headroom */ offset = NET_SKB_PAD; - len = IPA_RX_BUFFER_SIZE - offset; + len = buffer_size - offset; ret = gsi_trans_page_add(trans, page, len, offset); if (ret) - goto err_trans_free; - trans->data = page; /* transaction owns page now */ - - if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) { - doorbell = true; - endpoint->replenish_ready = 0; - } - - gsi_trans_commit(trans, doorbell); - - return 0; - -err_trans_free: - gsi_trans_free(trans); -err_free_pages: - __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); + put_page(page); + else + trans->data = page; /* transaction owns page now */ - return -ENOMEM; + return ret; } /** * ipa_endpoint_replenish() - Replenish endpoint receive buffers * @endpoint: Endpoint to be replenished - * @add_one: Whether this is replacing a just-consumed buffer * * The IPA hardware can hold a fixed number of receive buffers for an RX * endpoint, based on the number of entries in the underlying channel ring * buffer. If an endpoint's "backlog" is non-zero, it indicates how many * more receive buffers can be supplied to the hardware. Replenishing for - * an endpoint can be disabled, in which case requests to replenish a - * buffer are "saved", and transferred to the backlog once it is re-enabled - * again. + * an endpoint can be disabled, in which case buffers are not queued to + * the hardware. */ -static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one) +static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint) { - struct gsi *gsi; - u32 backlog; + struct gsi_trans *trans; - if (!endpoint->replenish_enabled) { - if (add_one) - atomic_inc(&endpoint->replenish_saved); + if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags)) return; - } - while (atomic_dec_not_zero(&endpoint->replenish_backlog)) - if (ipa_endpoint_replenish_one(endpoint)) + /* Skip it if it's already active */ + if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags)) + return; + + while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) { + bool doorbell; + + if (ipa_endpoint_replenish_one(endpoint, trans)) goto try_again_later; - if (add_one) - atomic_inc(&endpoint->replenish_backlog); + + + /* Ring the doorbell if we've got a full batch */ + doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH); + gsi_trans_commit(trans, doorbell); + } + + clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); return; try_again_later: - /* The last one didn't succeed, so fix the backlog */ - backlog = atomic_inc_return(&endpoint->replenish_backlog); - - if (add_one) - atomic_inc(&endpoint->replenish_backlog); + gsi_trans_free(trans); + clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); /* Whenever a receive buffer transaction completes we'll try to * replenish again. It's unlikely, but if we fail to supply even * one buffer, nothing will trigger another replenish attempt. - * Receive buffer transactions use one TRE, so schedule work to - * try replenishing again if our backlog is *all* available TREs. + * If the hardware has no receive buffers queued, schedule work to + * try replenishing again. */ - gsi = &endpoint->ipa->gsi; - if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id)) + if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) schedule_delayed_work(&endpoint->replenish_work, msecs_to_jiffies(1)); } static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint) { - struct gsi *gsi = &endpoint->ipa->gsi; - u32 max_backlog; - u32 saved; - - endpoint->replenish_enabled = true; - while ((saved = atomic_xchg(&endpoint->replenish_saved, 0))) - atomic_add(saved, &endpoint->replenish_backlog); + set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); /* Start replenishing if hardware currently has no buffers */ - max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id); - if (atomic_read(&endpoint->replenish_backlog) == max_backlog) - ipa_endpoint_replenish(endpoint, false); + if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id)) + ipa_endpoint_replenish(endpoint); } static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint) { - u32 backlog; - - endpoint->replenish_enabled = false; - while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0))) - atomic_add(backlog, &endpoint->replenish_saved); + clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); } static void ipa_endpoint_replenish_work(struct work_struct *work) @@ -1133,7 +1250,7 @@ static void ipa_endpoint_replenish_work(struct work_struct *work) endpoint = container_of(dwork, struct ipa_endpoint, replenish_work); - ipa_endpoint_replenish(endpoint, false); + ipa_endpoint_replenish(endpoint); } static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, @@ -1141,32 +1258,33 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint, { struct sk_buff *skb; + if (!endpoint->netdev) + return; + skb = __dev_alloc_skb(len, GFP_ATOMIC); if (skb) { + /* Copy the data into the socket buffer and receive it */ skb_put(skb, len); memcpy(skb->data, data, len); skb->truesize += extra; } - /* Now receive it, or drop it if there's no netdev */ - if (endpoint->netdev) - ipa_modem_skb_rx(endpoint->netdev, skb); - else if (skb) - dev_kfree_skb_any(skb); + ipa_modem_skb_rx(endpoint->netdev, skb); } static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint, struct page *page, u32 len) { + u32 buffer_size = endpoint->config.rx.buffer_size; struct sk_buff *skb; /* Nothing to do if there's no netdev */ if (!endpoint->netdev) return false; - WARN_ON(len > SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE - NET_SKB_PAD)); + WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD)); - skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE); + skb = build_skb(page_address(page), buffer_size); if (skb) { /* Reserve the headroom and account for the data */ skb_reserve(skb, NET_SKB_PAD); @@ -1264,8 +1382,9 @@ static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint, static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, struct page *page, u32 total_len) { + u32 buffer_size = endpoint->config.rx.buffer_size; void *data = page_address(page) + NET_SKB_PAD; - u32 unused = IPA_RX_BUFFER_SIZE - total_len; + u32 unused = buffer_size - total_len; u32 resid = total_len; while (resid) { @@ -1293,10 +1412,10 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, * And if checksum offload is enabled a trailer containing * computed checksum information will be appended. */ - align = endpoint->data->rx.pad_align ? : 1; + align = endpoint->config.rx.pad_align ? : 1; len = le16_to_cpu(status->pkt_len); len = sizeof(*status) + ALIGN(len, align); - if (endpoint->data->checksum) + if (endpoint->config.checksum) len += sizeof(struct rmnet_map_dl_csum_trailer); if (!ipa_endpoint_status_drop(endpoint, status)) { @@ -1323,38 +1442,25 @@ static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint, } } -/* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */ -static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint, - struct gsi_trans *trans) -{ -} - -/* Complete transaction initiated in ipa_endpoint_replenish_one() */ -static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint, - struct gsi_trans *trans) +void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, + struct gsi_trans *trans) { struct page *page; - ipa_endpoint_replenish(endpoint, true); + if (endpoint->toward_ipa) + return; if (trans->cancelled) - return; + goto done; /* Parse or build a socket buffer using the actual received length */ page = trans->data; - if (endpoint->data->status_enable) + if (endpoint->config.status_enable) ipa_endpoint_status_parse(endpoint, page, trans->len); else if (ipa_endpoint_skb_build(endpoint, page, trans->len)) trans->data = NULL; /* Pages have been consumed */ -} - -void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint, - struct gsi_trans *trans) -{ - if (endpoint->toward_ipa) - ipa_endpoint_tx_complete(endpoint, trans); - else - ipa_endpoint_rx_complete(endpoint, trans); +done: + ipa_endpoint_replenish(endpoint); } void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, @@ -1374,22 +1480,24 @@ void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint, struct page *page = trans->data; if (page) - __free_pages(page, get_order(IPA_RX_BUFFER_SIZE)); + put_page(page); } } void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id) { + const struct ipa_reg *reg; u32 val; + reg = ipa_reg(ipa, ROUTE); /* ROUTE_DIS is 0 */ - val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK); - val |= ROUTE_DEF_HDR_TABLE_FMASK; - val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK); - val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK); - val |= ROUTE_DEF_RETAIN_HDR_FMASK; + val = ipa_reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id); + val |= ipa_reg_bit(reg, ROUTE_DEF_HDR_TABLE); + /* ROUTE_DEF_HDR_OFST is 0 */ + val |= ipa_reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id); + val |= ipa_reg_bit(reg, ROUTE_DEF_RETAIN_HDR); - iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET); + iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg)); } void ipa_endpoint_default_route_clear(struct ipa *ipa) @@ -1505,7 +1613,7 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) * All other cases just need to reset the underlying GSI channel. */ special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa && - endpoint->data->aggregation; + endpoint->config.aggregation; if (special && ipa_endpoint_aggr_active(endpoint)) ret = ipa_endpoint_reset_rx_aggr(endpoint); else @@ -1519,10 +1627,19 @@ static void ipa_endpoint_reset(struct ipa_endpoint *endpoint) static void ipa_endpoint_program(struct ipa_endpoint *endpoint) { - if (endpoint->toward_ipa) - ipa_endpoint_program_delay(endpoint, false); - else + if (endpoint->toward_ipa) { + /* Newer versions of IPA use GSI channel flow control + * instead of endpoint DELAY mode to prevent sending data. + * Flow control is disabled for newly-allocated channels, + * and we can assume flow control is not (ever) enabled + * for AP TX channels. + */ + if (endpoint->ipa->version < IPA_VERSION_4_2) + ipa_endpoint_program_delay(endpoint, false); + } else { + /* Ensure suspend mode is off on all AP RX endpoints */ (void)ipa_endpoint_program_suspend(endpoint, false); + } ipa_endpoint_init_cfg(endpoint); ipa_endpoint_init_nat(endpoint); ipa_endpoint_init_hdr(endpoint); @@ -1530,6 +1647,12 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint) ipa_endpoint_init_hdr_metadata_mask(endpoint); ipa_endpoint_init_mode(endpoint); ipa_endpoint_init_aggr(endpoint); + if (!endpoint->toward_ipa) { + if (endpoint->config.rx.holb_drop) + ipa_endpoint_init_hol_block_enable(endpoint, 0); + else + ipa_endpoint_init_hol_block_disable(endpoint); + } ipa_endpoint_init_deaggr(endpoint); ipa_endpoint_init_rsrc_grp(endpoint); ipa_endpoint_init_seq(endpoint); @@ -1661,15 +1784,13 @@ static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint) if (endpoint->ee_id != GSI_EE_AP) return; - endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id); + endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1; if (!endpoint->toward_ipa) { /* RX transactions require a single TRE, so the maximum * backlog is the same as the maximum outstanding TREs. */ - endpoint->replenish_enabled = false; - atomic_set(&endpoint->replenish_saved, - gsi_channel_tre_max(gsi, endpoint->channel_id)); - atomic_set(&endpoint->replenish_backlog, 0); + clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags); + clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags); INIT_DELAYED_WORK(&endpoint->replenish_work, ipa_endpoint_replenish_work); } @@ -1720,6 +1841,7 @@ void ipa_endpoint_teardown(struct ipa *ipa) int ipa_endpoint_config(struct ipa *ipa) { struct device *dev = &ipa->pdev->dev; + const struct ipa_reg *reg; u32 initialized; u32 rx_base; u32 rx_mask; @@ -1746,11 +1868,12 @@ int ipa_endpoint_config(struct ipa *ipa) /* Find out about the endpoints supplied by the hardware, and ensure * the highest one doesn't exceed the number we support. */ - val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET); + reg = ipa_reg(ipa, FLAVOR_0); + val = ioread32(ipa->reg_virt + ipa_reg_offset(reg)); /* Our RX is an IPA producer */ - rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK); - max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK); + rx_base = ipa_reg_decode(reg, PROD_LOWEST, val); + max = rx_base + ipa_reg_decode(reg, MAX_PROD_PIPES, val); if (max > IPA_ENDPOINT_MAX) { dev_err(dev, "too many endpoints (%u > %u)\n", max, IPA_ENDPOINT_MAX); @@ -1759,7 +1882,7 @@ int ipa_endpoint_config(struct ipa *ipa) rx_mask = GENMASK(max - 1, rx_base); /* Our TX is an IPA consumer */ - max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK); + max = ipa_reg_decode(reg, MAX_CONS_PIPES, val); tx_mask = GENMASK(max - 1, 0); ipa->available = rx_mask | tx_mask; @@ -1811,7 +1934,7 @@ static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name, endpoint->channel_id = data->channel_id; endpoint->endpoint_id = data->endpoint_id; endpoint->toward_ipa = data->toward_ipa; - endpoint->data = &data->endpoint.config; + endpoint->config = data->endpoint.config; ipa->initialized |= BIT(endpoint->endpoint_id); } @@ -1845,6 +1968,8 @@ u32 ipa_endpoint_init(struct ipa *ipa, u32 count, enum ipa_endpoint_name name; u32 filter_map; + BUILD_BUG_ON(!IPA_REPLENISH_BATCH); + if (!ipa_endpoint_data_valid(ipa, count, data)) return 0; /* Error */ @@ -1859,6 +1984,8 @@ u32 ipa_endpoint_init(struct ipa *ipa, u32 count, if (data->endpoint.filter_support) filter_map |= BIT(data->endpoint_id); + if (data->ee_id == GSI_EE_MODEM && data->toward_ipa) + ipa->modem_tx_count++; } if (!ipa_filter_map_valid(ipa, filter_map)) |