aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMika Westerberg <mika.westerberg@linux.intel.com>2022-08-30 18:32:49 +0300
committerDavid S. Miller <davem@davemloft.net>2022-08-31 14:05:12 +0100
commit8bdc25cf62c798a696f9acb725bee6e71054893d (patch)
treef5664b21dd5934bf8f21a703861bfc07380c2cdc
parentthunderbolt: Add back Intel Falcon Ridge end-to-end flow control workaround (diff)
downloadlinux-dev-8bdc25cf62c798a696f9acb725bee6e71054893d.tar.xz
linux-dev-8bdc25cf62c798a696f9acb725bee6e71054893d.zip
net: thunderbolt: Enable full end-to-end flow control
USB4NET protocol allows the networking drivers to take advantage of end-to-end flow control supported by the USB4 host interface. This should prevent the receiving side from dropping network packets. In adddition add a module parameter that can be used to turn this off just in case it causes problems. Signed-off-by: Mika Westerberg <mika.westerberg@linux.intel.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/thunderbolt.c29
1 files changed, 20 insertions, 9 deletions
diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c
index ab3f04562980..8e272d2a61e5 100644
--- a/drivers/net/thunderbolt.c
+++ b/drivers/net/thunderbolt.c
@@ -30,6 +30,7 @@
#define TBNET_RING_SIZE 256
#define TBNET_LOGIN_RETRIES 60
#define TBNET_LOGOUT_RETRIES 10
+#define TBNET_E2E BIT(0)
#define TBNET_MATCH_FRAGS_ID BIT(1)
#define TBNET_64K_FRAMES BIT(2)
#define TBNET_MAX_MTU SZ_64K
@@ -209,6 +210,10 @@ static const uuid_t tbnet_svc_uuid =
static struct tb_property_dir *tbnet_dir;
+static bool tbnet_e2e = true;
+module_param_named(e2e, tbnet_e2e, bool, 0444);
+MODULE_PARM_DESC(e2e, "USB4NET full end-to-end flow control (default: true)");
+
static void tbnet_fill_header(struct thunderbolt_ip_header *hdr, u64 route,
u8 sequence, const uuid_t *initiator_uuid, const uuid_t *target_uuid,
enum thunderbolt_ip_type type, size_t size, u32 command_id)
@@ -873,6 +878,7 @@ static int tbnet_open(struct net_device *dev)
struct tb_xdomain *xd = net->xd;
u16 sof_mask, eof_mask;
struct tb_ring *ring;
+ unsigned int flags;
int hopid;
netif_carrier_off(dev);
@@ -897,9 +903,14 @@ static int tbnet_open(struct net_device *dev)
sof_mask = BIT(TBIP_PDF_FRAME_START);
eof_mask = BIT(TBIP_PDF_FRAME_END);
- ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE,
- RING_FLAG_FRAME, 0, sof_mask, eof_mask,
- tbnet_start_poll, net);
+ flags = RING_FLAG_FRAME;
+ /* Only enable full E2E if the other end supports it too */
+ if (tbnet_e2e && net->svc->prtcstns & TBNET_E2E)
+ flags |= RING_FLAG_E2E;
+
+ ring = tb_ring_alloc_rx(xd->tb->nhi, -1, TBNET_RING_SIZE, flags,
+ net->tx_ring.ring->hop, sof_mask,
+ eof_mask, tbnet_start_poll, net);
if (!ring) {
netdev_err(dev, "failed to allocate Rx ring\n");
tb_ring_free(net->tx_ring.ring);
@@ -1362,6 +1373,7 @@ static struct tb_service_driver tbnet_driver = {
static int __init tbnet_init(void)
{
+ unsigned int flags;
int ret;
tbnet_dir = tb_property_create_dir(&tbnet_dir_uuid);
@@ -1371,12 +1383,11 @@ static int __init tbnet_init(void)
tb_property_add_immediate(tbnet_dir, "prtcid", 1);
tb_property_add_immediate(tbnet_dir, "prtcvers", 1);
tb_property_add_immediate(tbnet_dir, "prtcrevs", 1);
- /* Currently only announce support for match frags ID (bit 1). Bit 0
- * is reserved for full E2E flow control which we do not support at
- * the moment.
- */
- tb_property_add_immediate(tbnet_dir, "prtcstns",
- TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES);
+
+ flags = TBNET_MATCH_FRAGS_ID | TBNET_64K_FRAMES;
+ if (tbnet_e2e)
+ flags |= TBNET_E2E;
+ tb_property_add_immediate(tbnet_dir, "prtcstns", flags);
ret = tb_register_property_dir("network", tbnet_dir);
if (ret) {