diff options
Diffstat (limited to 'drivers/thunderbolt')
34 files changed, 15666 insertions, 1767 deletions
diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig index 1eb757e8df3b..448fd2ec8f6e 100644 --- a/drivers/thunderbolt/Kconfig +++ b/drivers/thunderbolt/Kconfig @@ -2,17 +2,57 @@ menuconfig USB4 tristate "Unified support for USB4 and Thunderbolt" depends on PCI - depends on X86 || COMPILE_TEST select APPLE_PROPERTIES if EFI_STUB && X86 select CRC32 select CRYPTO select CRYPTO_HASH select NVMEM help - USB4 and Thunderbolt driver. USB4 is the public speficiation - based on Thunderbolt 3 protocol. This driver is required if + USB4 and Thunderbolt driver. USB4 is the public specification + based on the Thunderbolt 3 protocol. This driver is required if you want to hotplug Thunderbolt and USB4 compliant devices on Apple hardware or on PCs with Intel Falcon Ridge or newer. To compile this driver a module, choose M here. The module will be called thunderbolt. + +if USB4 + +config USB4_DEBUGFS_WRITE + bool "Enable write by debugfs to configuration spaces (DANGEROUS)" + help + Enables writing to device configuration registers through + debugfs interface. + + Only enable this if you know what you are doing! Never enable + this for production systems or distro kernels. + +config USB4_DEBUGFS_MARGINING + bool "Expose receiver lane margining operations under USB4 ports (DANGEROUS)" + depends on DEBUG_FS + depends on USB4_DEBUGFS_WRITE + help + Enables hardware and software based receiver lane margining support + under each USB4 port. Used for electrical quality and robustness + validation during manufacturing. Should not be enabled by distro + kernels. + +config USB4_KUNIT_TEST + bool "KUnit tests" if !KUNIT_ALL_TESTS + depends on USB4 && KUNIT=y + default KUNIT_ALL_TESTS + +config USB4_DMA_TEST + tristate "DMA traffic test driver" + depends on DEBUG_FS + help + This allows sending and receiving DMA traffic through loopback + connection. Loopback connection can be done by either special + dongle that has TX/RX lines crossed, or by simply connecting a + cable back to the host. Only enable this if you know what you + are doing. Normal users and distro kernels should say N here. + + To compile this driver a module, choose M here. The module will be + called thunderbolt_dma_test. + +endif # USB4 diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile index eae28dd45250..78fd365893c1 100644 --- a/drivers/thunderbolt/Makefile +++ b/drivers/thunderbolt/Makefile @@ -2,3 +2,12 @@ obj-${CONFIG_USB4} := thunderbolt.o thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o +thunderbolt-objs += usb4_port.o nvm.o retimer.o quirks.o + +thunderbolt-${CONFIG_ACPI} += acpi.o +thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o +thunderbolt-${CONFIG_USB4_KUNIT_TEST} += test.o +CFLAGS_test.o += $(DISABLE_STRUCTLEAK_PLUGIN) + +thunderbolt_dma_test-${CONFIG_USB4_DMA_TEST} += dma_test.o +obj-$(CONFIG_USB4_DMA_TEST) += thunderbolt_dma_test.o diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c new file mode 100644 index 000000000000..7a8adf5ad5a0 --- /dev/null +++ b/drivers/thunderbolt/acpi.c @@ -0,0 +1,386 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ACPI support + * + * Copyright (C) 2020, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/acpi.h> +#include <linux/pm_runtime.h> + +#include "tb.h" + +static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data, + void **return_value) +{ + struct acpi_device *adev = acpi_fetch_acpi_dev(handle); + struct fwnode_reference_args args; + struct fwnode_handle *fwnode; + struct tb_nhi *nhi = data; + struct pci_dev *pdev; + struct device *dev; + int ret; + + if (!adev) + return AE_OK; + + fwnode = acpi_fwnode_handle(adev); + ret = fwnode_property_get_reference_args(fwnode, "usb4-host-interface", + NULL, 0, 0, &args); + if (ret) + return AE_OK; + + /* It needs to reference this NHI */ + if (dev_fwnode(&nhi->pdev->dev) != args.fwnode) + goto out_put; + + /* + * Try to find physical device walking upwards to the hierarcy. + * We need to do this because the xHCI driver might not yet be + * bound so the USB3 SuperSpeed ports are not yet created. + */ + dev = acpi_get_first_physical_node(adev); + while (!dev) { + adev = acpi_dev_parent(adev); + if (!adev) + break; + dev = acpi_get_first_physical_node(adev); + } + + if (!dev) + goto out_put; + + /* + * Check that the device is PCIe. This is because USB3 + * SuperSpeed ports have this property and they are not power + * managed with the xHCI and the SuperSpeed hub so we create the + * link from xHCI instead. + */ + while (dev && !dev_is_pci(dev)) + dev = dev->parent; + + if (!dev) + goto out_put; + + /* + * Check that this actually matches the type of device we + * expect. It should either be xHCI or PCIe root/downstream + * port. + */ + pdev = to_pci_dev(dev); + if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI || + (pci_is_pcie(pdev) && + (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || + pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM))) { + const struct device_link *link; + + /* + * Make them both active first to make sure the NHI does + * not runtime suspend before the consumer. The + * pm_runtime_put() below then allows the consumer to + * runtime suspend again (which then allows NHI runtime + * suspend too now that the device link is established). + */ + pm_runtime_get_sync(&pdev->dev); + + link = device_link_add(&pdev->dev, &nhi->pdev->dev, + DL_FLAG_AUTOREMOVE_SUPPLIER | + DL_FLAG_RPM_ACTIVE | + DL_FLAG_PM_RUNTIME); + if (link) { + dev_dbg(&nhi->pdev->dev, "created link from %s\n", + dev_name(&pdev->dev)); + } else { + dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n", + dev_name(&pdev->dev)); + } + + pm_runtime_put(&pdev->dev); + } + +out_put: + fwnode_handle_put(args.fwnode); + return AE_OK; +} + +/** + * tb_acpi_add_links() - Add device links based on ACPI description + * @nhi: Pointer to NHI + * + * Goes over ACPI namespace finding tunneled ports that reference to + * @nhi ACPI node. For each reference a device link is added. The link + * is automatically removed by the driver core. + */ +void tb_acpi_add_links(struct tb_nhi *nhi) +{ + acpi_status status; + + if (!has_acpi_companion(&nhi->pdev->dev)) + return; + + /* + * Find all devices that have usb4-host-controller interface + * property that references to this NHI. + */ + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 32, + tb_acpi_add_link, NULL, nhi, NULL); + if (ACPI_FAILURE(status)) + dev_warn(&nhi->pdev->dev, "failed to enumerate tunneled ports\n"); +} + +/** + * tb_acpi_is_native() - Did the platform grant native TBT/USB4 control + * + * Returns %true if the platform granted OS native control over + * TBT/USB4. In this case software based connection manager can be used, + * otherwise there is firmware based connection manager running. + */ +bool tb_acpi_is_native(void) +{ + return osc_sb_native_usb4_support_confirmed && + osc_sb_native_usb4_control; +} + +/** + * tb_acpi_may_tunnel_usb3() - Is USB3 tunneling allowed by the platform + * + * When software based connection manager is used, this function + * returns %true if platform allows native USB3 tunneling. + */ +bool tb_acpi_may_tunnel_usb3(void) +{ + if (tb_acpi_is_native()) + return osc_sb_native_usb4_control & OSC_USB_USB3_TUNNELING; + return true; +} + +/** + * tb_acpi_may_tunnel_dp() - Is DisplayPort tunneling allowed by the platform + * + * When software based connection manager is used, this function + * returns %true if platform allows native DP tunneling. + */ +bool tb_acpi_may_tunnel_dp(void) +{ + if (tb_acpi_is_native()) + return osc_sb_native_usb4_control & OSC_USB_DP_TUNNELING; + return true; +} + +/** + * tb_acpi_may_tunnel_pcie() - Is PCIe tunneling allowed by the platform + * + * When software based connection manager is used, this function + * returns %true if platform allows native PCIe tunneling. + */ +bool tb_acpi_may_tunnel_pcie(void) +{ + if (tb_acpi_is_native()) + return osc_sb_native_usb4_control & OSC_USB_PCIE_TUNNELING; + return true; +} + +/** + * tb_acpi_is_xdomain_allowed() - Are XDomain connections allowed + * + * When software based connection manager is used, this function + * returns %true if platform allows XDomain connections. + */ +bool tb_acpi_is_xdomain_allowed(void) +{ + if (tb_acpi_is_native()) + return osc_sb_native_usb4_control & OSC_USB_XDOMAIN; + return true; +} + +/* UUID for retimer _DSM: e0053122-795b-4122-8a5e-57be1d26acb3 */ +static const guid_t retimer_dsm_guid = + GUID_INIT(0xe0053122, 0x795b, 0x4122, + 0x8a, 0x5e, 0x57, 0xbe, 0x1d, 0x26, 0xac, 0xb3); + +#define RETIMER_DSM_QUERY_ONLINE_STATE 1 +#define RETIMER_DSM_SET_ONLINE_STATE 2 + +static int tb_acpi_retimer_set_power(struct tb_port *port, bool power) +{ + struct usb4_port *usb4 = port->usb4; + union acpi_object argv4[2]; + struct acpi_device *adev; + union acpi_object *obj; + int ret; + + if (!usb4->can_offline) + return 0; + + adev = ACPI_COMPANION(&usb4->dev); + if (WARN_ON(!adev)) + return 0; + + /* Check if we are already powered on (and in correct mode) */ + obj = acpi_evaluate_dsm_typed(adev->handle, &retimer_dsm_guid, 1, + RETIMER_DSM_QUERY_ONLINE_STATE, NULL, + ACPI_TYPE_INTEGER); + if (!obj) { + tb_port_warn(port, "ACPI: query online _DSM failed\n"); + return -EIO; + } + + ret = obj->integer.value; + ACPI_FREE(obj); + + if (power == ret) + return 0; + + tb_port_dbg(port, "ACPI: calling _DSM to power %s retimers\n", + power ? "on" : "off"); + + argv4[0].type = ACPI_TYPE_PACKAGE; + argv4[0].package.count = 1; + argv4[0].package.elements = &argv4[1]; + argv4[1].integer.type = ACPI_TYPE_INTEGER; + argv4[1].integer.value = power; + + obj = acpi_evaluate_dsm_typed(adev->handle, &retimer_dsm_guid, 1, + RETIMER_DSM_SET_ONLINE_STATE, argv4, + ACPI_TYPE_INTEGER); + if (!obj) { + tb_port_warn(port, + "ACPI: set online state _DSM evaluation failed\n"); + return -EIO; + } + + ret = obj->integer.value; + ACPI_FREE(obj); + + if (ret >= 0) { + if (power) + return ret == 1 ? 0 : -EBUSY; + return 0; + } + + tb_port_warn(port, "ACPI: set online state _DSM failed with error %d\n", ret); + return -EIO; +} + +/** + * tb_acpi_power_on_retimers() - Call platform to power on retimers + * @port: USB4 port + * + * Calls platform to turn on power to all retimers behind this USB4 + * port. After this function returns successfully the caller can + * continue with the normal retimer flows (as specified in the USB4 + * spec). Note if this returns %-EBUSY it means the type-C port is in + * non-USB4/TBT mode (there is non-USB4/TBT device connected). + * + * This should only be called if the USB4/TBT link is not up. + * + * Returns %0 on success. + */ +int tb_acpi_power_on_retimers(struct tb_port *port) +{ + return tb_acpi_retimer_set_power(port, true); +} + +/** + * tb_acpi_power_off_retimers() - Call platform to power off retimers + * @port: USB4 port + * + * This is the opposite of tb_acpi_power_on_retimers(). After returning + * successfully the normal operations with the @port can continue. + * + * Returns %0 on success. + */ +int tb_acpi_power_off_retimers(struct tb_port *port) +{ + return tb_acpi_retimer_set_power(port, false); +} + +static bool tb_acpi_bus_match(struct device *dev) +{ + return tb_is_switch(dev) || tb_is_usb4_port_device(dev); +} + +static struct acpi_device *tb_acpi_switch_find_companion(struct tb_switch *sw) +{ + struct acpi_device *adev = NULL; + struct tb_switch *parent_sw; + + /* + * Device routers exists under the downstream facing USB4 port + * of the parent router. Their _ADR is always 0. + */ + parent_sw = tb_switch_parent(sw); + if (parent_sw) { + struct tb_port *port = tb_port_at(tb_route(sw), parent_sw); + struct acpi_device *port_adev; + + port_adev = acpi_find_child_by_adr(ACPI_COMPANION(&parent_sw->dev), + port->port); + if (port_adev) + adev = acpi_find_child_device(port_adev, 0, false); + } else { + struct tb_nhi *nhi = sw->tb->nhi; + struct acpi_device *parent_adev; + + parent_adev = ACPI_COMPANION(&nhi->pdev->dev); + if (parent_adev) + adev = acpi_find_child_device(parent_adev, 0, false); + } + + return adev; +} + +static struct acpi_device *tb_acpi_find_companion(struct device *dev) +{ + /* + * The Thunderbolt/USB4 hierarchy looks like following: + * + * Device (NHI) + * Device (HR) // Host router _ADR == 0 + * Device (DFP0) // Downstream port _ADR == lane 0 adapter + * Device (DR) // Device router _ADR == 0 + * Device (UFP) // Upstream port _ADR == lane 0 adapter + * Device (DFP1) // Downstream port _ADR == lane 0 adapter number + * + * At the moment we bind the host router to the corresponding + * Linux device. + */ + if (tb_is_switch(dev)) + return tb_acpi_switch_find_companion(tb_to_switch(dev)); + else if (tb_is_usb4_port_device(dev)) + return acpi_find_child_by_adr(ACPI_COMPANION(dev->parent), + tb_to_usb4_port_device(dev)->port->port); + return NULL; +} + +static void tb_acpi_setup(struct device *dev) +{ + struct acpi_device *adev = ACPI_COMPANION(dev); + struct usb4_port *usb4 = tb_to_usb4_port_device(dev); + + if (!adev || !usb4) + return; + + if (acpi_check_dsm(adev->handle, &retimer_dsm_guid, 1, + BIT(RETIMER_DSM_QUERY_ONLINE_STATE) | + BIT(RETIMER_DSM_SET_ONLINE_STATE))) + usb4->can_offline = true; +} + +static struct acpi_bus_type tb_acpi_bus = { + .name = "thunderbolt", + .match = tb_acpi_bus_match, + .find_companion = tb_acpi_find_companion, + .setup = tb_acpi_setup, +}; + +int tb_acpi_init(void) +{ + return register_acpi_bus_type(&tb_acpi_bus); +} + +void tb_acpi_exit(void) +{ + unregister_acpi_bus_type(&tb_acpi_bus); +} diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c index 19db6cdc5b70..8ecd610c62d5 100644 --- a/drivers/thunderbolt/cap.c +++ b/drivers/thunderbolt/cap.c @@ -15,14 +15,6 @@ #define VSE_CAP_OFFSET_MAX 0xffff #define TMU_ACCESS_EN BIT(20) -struct tb_cap_any { - union { - struct tb_cap_basic basic; - struct tb_cap_extended_short extended_short; - struct tb_cap_extended_long extended_long; - }; -} __packed; - static int tb_port_enable_tmu(struct tb_port *port, bool enable) { struct tb_switch *sw = port->sw; @@ -67,23 +59,50 @@ static void tb_port_dummy_read(struct tb_port *port) } } +/** + * tb_port_next_cap() - Return next capability in the linked list + * @port: Port to find the capability for + * @offset: Previous capability offset (%0 for start) + * + * Returns dword offset of the next capability in port config space + * capability list and returns it. Passing %0 returns the first entry in + * the capability list. If no next capability is found returns %0. In case + * of failure returns negative errno. + */ +int tb_port_next_cap(struct tb_port *port, unsigned int offset) +{ + struct tb_cap_any header; + int ret; + + if (!offset) + return port->config.first_cap_offset; + + ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1); + if (ret) + return ret; + + return header.basic.next; +} + static int __tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap) { - u32 offset = 1; + int offset = 0; do { struct tb_cap_any header; int ret; + offset = tb_port_next_cap(port, offset); + if (offset < 0) + return offset; + ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1); if (ret) return ret; if (header.basic.cap == cap) return offset; - - offset = header.basic.next; - } while (offset); + } while (offset > 0); return -ENOENT; } @@ -114,8 +133,52 @@ int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap) } /** + * tb_switch_next_cap() - Return next capability in the linked list + * @sw: Switch to find the capability for + * @offset: Previous capability offset (%0 for start) + * + * Finds dword offset of the next capability in router config space + * capability list and returns it. Passing %0 returns the first entry in + * the capability list. If no next capability is found returns %0. In case + * of failure returns negative errno. + */ +int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset) +{ + struct tb_cap_any header; + int ret; + + if (!offset) + return sw->config.first_cap_offset; + + ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 2); + if (ret) + return ret; + + switch (header.basic.cap) { + case TB_SWITCH_CAP_TMU: + ret = header.basic.next; + break; + + case TB_SWITCH_CAP_VSE: + if (!header.extended_short.length) + ret = header.extended_long.next; + else + ret = header.extended_short.next; + break; + + default: + tb_sw_dbg(sw, "unknown capability %#x at %#x\n", + header.basic.cap, offset); + ret = -EINVAL; + break; + } + + return ret >= VSE_CAP_OFFSET_MAX ? 0 : ret; +} + +/** * tb_switch_find_cap() - Find switch capability - * @sw Switch to find the capability for + * @sw: Switch to find the capability for * @cap: Capability to look * * Returns offset to start of capability or %-ENOENT if no such @@ -124,21 +187,23 @@ int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap) */ int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap) { - int offset = sw->config.first_cap_offset; + int offset = 0; - while (offset > 0 && offset < CAP_OFFSET_MAX) { + do { struct tb_cap_any header; int ret; + offset = tb_switch_next_cap(sw, offset); + if (offset < 0) + return offset; + ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1); if (ret) return ret; if (header.basic.cap == cap) return offset; - - offset = header.basic.next; - } + } while (offset); return -ENOENT; } @@ -155,37 +220,24 @@ int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap) */ int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec) { - struct tb_cap_any header; - int offset; + int offset = 0; - offset = tb_switch_find_cap(sw, TB_SWITCH_CAP_VSE); - if (offset < 0) - return offset; - - while (offset > 0 && offset < VSE_CAP_OFFSET_MAX) { + do { + struct tb_cap_any header; int ret; - ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 2); + offset = tb_switch_next_cap(sw, offset); + if (offset < 0) + return offset; + + ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1); if (ret) return ret; - /* - * Extended vendor specific capabilities come in two - * flavors: short and long. The latter is used when - * offset is over 0xff. - */ - if (offset >= CAP_OFFSET_MAX) { - if (header.extended_long.vsec_id == vsec) - return offset; - offset = header.extended_long.next; - } else { - if (header.extended_short.vsec_id == vsec) - return offset; - if (!header.extended_short.length) - return -ENOENT; - offset = header.extended_short.next; - } - } + if (header.extended_short.cap == TB_SWITCH_CAP_VSE && + header.extended_short.vsec_id == vsec) + return offset; + } while (offset); return -ENOENT; } diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c index f77ceae5c7d7..0c661a706160 100644 --- a/drivers/thunderbolt/ctl.c +++ b/drivers/thunderbolt/ctl.c @@ -20,7 +20,18 @@ #define TB_CTL_RETRIES 4 /** - * struct tb_cfg - thunderbolt control channel + * struct tb_ctl - Thunderbolt control channel + * @nhi: Pointer to the NHI structure + * @tx: Transmit ring + * @rx: Receive ring + * @frame_pool: DMA pool for control messages + * @rx_packets: Received control messages + * @request_queue_lock: Lock protecting @request_queue + * @request_queue: List of outstanding requests + * @running: Is the control channel running at the moment + * @timeout_msec: Default timeout for non-raw control messages + * @callback: Callback called when hotplug message is received + * @callback_data: Data passed to @callback */ struct tb_ctl { struct tb_nhi *nhi; @@ -33,6 +44,7 @@ struct tb_ctl { struct list_head request_queue; bool running; + int timeout_msec; event_cb callback; void *callback_data; }; @@ -146,21 +158,20 @@ static bool tb_cfg_request_is_active(struct tb_cfg_request *req) static struct tb_cfg_request * tb_cfg_request_find(struct tb_ctl *ctl, struct ctl_pkg *pkg) { - struct tb_cfg_request *req; - bool found = false; + struct tb_cfg_request *req = NULL, *iter; mutex_lock(&pkg->ctl->request_queue_lock); - list_for_each_entry(req, &pkg->ctl->request_queue, list) { - tb_cfg_request_get(req); - if (req->match(req, pkg)) { - found = true; + list_for_each_entry(iter, &pkg->ctl->request_queue, list) { + tb_cfg_request_get(iter); + if (iter->match(iter, pkg)) { + req = iter; break; } - tb_cfg_request_put(req); + tb_cfg_request_put(iter); } mutex_unlock(&pkg->ctl->request_queue_lock); - return found ? req : NULL; + return req; } /* utility functions */ @@ -219,6 +230,7 @@ static int check_config_address(struct tb_cfg_address addr, static struct tb_cfg_result decode_error(const struct ctl_pkg *response) { struct cfg_error_pkg *pkg = response->buffer; + struct tb_ctl *ctl = response->ctl; struct tb_cfg_result res = { 0 }; res.response_route = tb_cfg_get_route(&pkg->header); res.response_port = 0; @@ -227,9 +239,13 @@ static struct tb_cfg_result decode_error(const struct ctl_pkg *response) if (res.err) return res; - WARN(pkg->zero1, "pkg->zero1 is %#x\n", pkg->zero1); - WARN(pkg->zero2, "pkg->zero1 is %#x\n", pkg->zero1); - WARN(pkg->zero3, "pkg->zero1 is %#x\n", pkg->zero1); + if (pkg->zero1) + tb_ctl_warn(ctl, "pkg->zero1 is %#x\n", pkg->zero1); + if (pkg->zero2) + tb_ctl_warn(ctl, "pkg->zero2 is %#x\n", pkg->zero2); + if (pkg->zero3) + tb_ctl_warn(ctl, "pkg->zero3 is %#x\n", pkg->zero3); + res.err = 1; res.tb_error = pkg->error; res.response_port = pkg->port; @@ -266,9 +282,8 @@ static void tb_cfg_print_error(struct tb_ctl *ctl, * Invalid cfg_space/offset/length combination in * cfg_read/cfg_write. */ - tb_ctl_WARN(ctl, - "CFG_ERROR(%llx:%x): Invalid config space or offset\n", - res->response_route, res->response_port); + tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n", + res->response_route, res->response_port); return; case TB_CFG_ERROR_NO_SUCH_PORT: /* @@ -283,6 +298,10 @@ static void tb_cfg_print_error(struct tb_ctl *ctl, tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n", res->response_route, res->response_port); return; + case TB_CFG_ERROR_LOCK: + tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n", + res->response_route, res->response_port); + return; default: /* 5,6,7,9 and 11 are also valid error codes */ tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n", @@ -330,7 +349,7 @@ static void tb_ctl_tx_callback(struct tb_ring *ring, struct ring_frame *frame, tb_ctl_pkg_free(pkg); } -/** +/* * tb_cfg_tx() - transmit a packet on the control channel * * len must be a multiple of four. @@ -367,7 +386,7 @@ static int tb_ctl_tx(struct tb_ctl *ctl, const void *data, size_t len, return res; } -/** +/* * tb_ctl_handle_event() - acknowledge a plug event, invoke ctl->callback */ static bool tb_ctl_handle_event(struct tb_ctl *ctl, enum tb_cfg_pkg_type type, @@ -388,7 +407,7 @@ static void tb_ctl_rx_submit(struct ctl_pkg *pkg) static int tb_async_error(const struct ctl_pkg *pkg) { - const struct cfg_error_pkg *error = (const struct cfg_error_pkg *)pkg; + const struct cfg_error_pkg *error = pkg->buffer; if (pkg->frame.eof != TB_CFG_PKG_ERROR) return false; @@ -453,7 +472,7 @@ static void tb_ctl_rx_callback(struct tb_ring *ring, struct ring_frame *frame, "RX: checksum mismatch, dropping packet\n"); goto rx; } - /* Fall through */ + fallthrough; case TB_CFG_PKG_ICM_EVENT: if (tb_ctl_handle_event(pkg->ctl, frame->eof, pkg, frame->size)) goto rx; @@ -594,18 +613,24 @@ struct tb_cfg_result tb_cfg_request_sync(struct tb_ctl *ctl, /** * tb_ctl_alloc() - allocate a control channel + * @nhi: Pointer to NHI + * @timeout_msec: Default timeout used with non-raw control messages + * @cb: Callback called for plug events + * @cb_data: Data passed to @cb * * cb will be invoked once for every hot plug event. * * Return: Returns a pointer on success or NULL on failure. */ -struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data) +struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb, + void *cb_data) { int i; struct tb_ctl *ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); if (!ctl) return NULL; ctl->nhi = nhi; + ctl->timeout_msec = timeout_msec; ctl->callback = cb; ctl->callback_data = cb_data; @@ -620,8 +645,8 @@ struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data) if (!ctl->tx) goto err; - ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0xffff, - 0xffff, NULL, NULL); + ctl->rx = tb_ring_alloc_rx(nhi, 0, 10, RING_FLAG_NO_SUSPEND, 0, 0xffff, + 0xffff, NULL, NULL); if (!ctl->rx) goto err; @@ -641,6 +666,7 @@ err: /** * tb_ctl_free() - free a control channel + * @ctl: Control channel to free * * Must be called after tb_ctl_stop. * @@ -668,7 +694,8 @@ void tb_ctl_free(struct tb_ctl *ctl) } /** - * tb_cfg_start() - start/resume the control channel + * tb_ctl_start() - start/resume the control channel + * @ctl: Control channel to start */ void tb_ctl_start(struct tb_ctl *ctl) { @@ -683,7 +710,8 @@ void tb_ctl_start(struct tb_ctl *ctl) } /** - * control() - pause the control channel + * tb_ctl_stop() - pause the control channel + * @ctl: Control channel to stop * * All invocations of ctl->callback will have finished after this method * returns. @@ -776,13 +804,14 @@ static bool tb_cfg_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg) /** * tb_cfg_reset() - send a reset packet and wait for a response + * @ctl: Control channel pointer + * @route: Router string for the router to send reset * * If the switch at route is incorrectly configured then we will not receive a * reply (even though the switch will reset). The caller should check for * -ETIMEDOUT and attempt to reconfigure the switch. */ -struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route, - int timeout_msec) +struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route) { struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) }; struct tb_cfg_result res = { 0 }; @@ -804,7 +833,7 @@ struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route, req->response_size = sizeof(reply); req->response_type = TB_CFG_PKG_RESET; - res = tb_cfg_request_sync(ctl, req, timeout_msec); + res = tb_cfg_request_sync(ctl, req, ctl->timeout_msec); tb_cfg_request_put(req); @@ -812,9 +841,17 @@ struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route, } /** - * tb_cfg_read() - read from config space into buffer + * tb_cfg_read_raw() - read from config space into buffer + * @ctl: Pointer to the control channel + * @buffer: Buffer where the data is read + * @route: Route string of the router + * @port: Port number when reading from %TB_CFG_PORT, %0 otherwise + * @space: Config space selector + * @offset: Dword word offset of the register to start reading + * @length: Number of dwords to read + * @timeout_msec: Timeout in ms how long to wait for the response * - * Offset and length are in dwords. + * Reads from router config space without translating the possible error. */ struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, enum tb_cfg_space space, @@ -875,9 +912,17 @@ struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer, } /** - * tb_cfg_write() - write from buffer into config space + * tb_cfg_write_raw() - write from buffer into config space + * @ctl: Pointer to the control channel + * @buffer: Data to write + * @route: Route string of the router + * @port: Port number when writing to %TB_CFG_PORT, %0 otherwise + * @space: Config space selector + * @offset: Dword word offset of the register to start writing + * @length: Number of dwords to write + * @timeout_msec: Timeout in ms how long to wait for the response * - * Offset and length are in dwords. + * Writes to router config space without translating the possible error. */ struct tb_cfg_result tb_cfg_write_raw(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port, enum tb_cfg_space space, @@ -951,6 +996,12 @@ static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space, return -ENODEV; tb_cfg_print_error(ctl, res); + + if (res->tb_error == TB_CFG_ERROR_LOCK) + return -EACCES; + else if (res->tb_error == TB_CFG_ERROR_PORT_NOT_CONNECTED) + return -ENOTCONN; + return -EIO; } @@ -958,7 +1009,7 @@ int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, enum tb_cfg_space space, u32 offset, u32 length) { struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port, - space, offset, length, TB_CFG_DEFAULT_TIMEOUT); + space, offset, length, ctl->timeout_msec); switch (res.err) { case 0: /* Success */ @@ -984,7 +1035,7 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port, enum tb_cfg_space space, u32 offset, u32 length) { struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port, - space, offset, length, TB_CFG_DEFAULT_TIMEOUT); + space, offset, length, ctl->timeout_msec); switch (res.err) { case 0: /* Success */ @@ -1008,6 +1059,8 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port, /** * tb_cfg_get_upstream_port() - get upstream port number of switch at route + * @ctl: Pointer to the control channel + * @route: Route string of the router * * Reads the first dword from the switches TB_CFG_SWITCH config area and * returns the port number from which the reply originated. @@ -1020,7 +1073,7 @@ int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route) u32 dummy; struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0, TB_CFG_SWITCH, 0, 1, - TB_CFG_DEFAULT_TIMEOUT); + ctl->timeout_msec); if (res.err == 1) return -EIO; if (res.err) diff --git a/drivers/thunderbolt/ctl.h b/drivers/thunderbolt/ctl.h index 97cb03b38953..7c7d80f96c0c 100644 --- a/drivers/thunderbolt/ctl.h +++ b/drivers/thunderbolt/ctl.h @@ -21,22 +21,21 @@ struct tb_ctl; typedef bool (*event_cb)(void *data, enum tb_cfg_pkg_type type, const void *buf, size_t size); -struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, event_cb cb, void *cb_data); +struct tb_ctl *tb_ctl_alloc(struct tb_nhi *nhi, int timeout_msec, event_cb cb, + void *cb_data); void tb_ctl_start(struct tb_ctl *ctl); void tb_ctl_stop(struct tb_ctl *ctl); void tb_ctl_free(struct tb_ctl *ctl); /* configuration commands */ -#define TB_CFG_DEFAULT_TIMEOUT 5000 /* msec */ - struct tb_cfg_result { u64 response_route; u32 response_port; /* * If err = 1 then this is the port that send the * error. * If err = 0 and if this was a cfg_read/write then - * this is the the upstream port of the responding + * this is the upstream port of the responding * switch. * Otherwise the field is set to zero. */ @@ -124,8 +123,7 @@ static inline struct tb_cfg_header tb_cfg_make_header(u64 route) } int tb_cfg_ack_plug(struct tb_ctl *ctl, u64 route, u32 port, bool unplug); -struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route, - int timeout_msec); +struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route); struct tb_cfg_result tb_cfg_read_raw(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, enum tb_cfg_space space, u32 offset, diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c new file mode 100644 index 000000000000..834bcad42e9f --- /dev/null +++ b/drivers/thunderbolt/debugfs.c @@ -0,0 +1,1573 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Debugfs interface + * + * Copyright (C) 2020, Intel Corporation + * Authors: Gil Fine <gil.fine@intel.com> + * Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/debugfs.h> +#include <linux/pm_runtime.h> +#include <linux/uaccess.h> + +#include "tb.h" +#include "sb_regs.h" + +#define PORT_CAP_PCIE_LEN 1 +#define PORT_CAP_POWER_LEN 2 +#define PORT_CAP_LANE_LEN 3 +#define PORT_CAP_USB3_LEN 5 +#define PORT_CAP_DP_LEN 8 +#define PORT_CAP_TMU_LEN 8 +#define PORT_CAP_BASIC_LEN 9 +#define PORT_CAP_USB4_LEN 20 + +#define SWITCH_CAP_TMU_LEN 26 +#define SWITCH_CAP_BASIC_LEN 27 + +#define PATH_LEN 2 + +#define COUNTER_SET_LEN 3 + +#define DEBUGFS_ATTR(__space, __write) \ +static int __space ## _open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, __space ## _show, inode->i_private); \ +} \ + \ +static const struct file_operations __space ## _fops = { \ + .owner = THIS_MODULE, \ + .open = __space ## _open, \ + .release = single_release, \ + .read = seq_read, \ + .write = __write, \ + .llseek = seq_lseek, \ +} + +#define DEBUGFS_ATTR_RO(__space) \ + DEBUGFS_ATTR(__space, NULL) + +#define DEBUGFS_ATTR_RW(__space) \ + DEBUGFS_ATTR(__space, __space ## _write) + +static struct dentry *tb_debugfs_root; + +static void *validate_and_copy_from_user(const void __user *user_buf, + size_t *count) +{ + size_t nbytes; + void *buf; + + if (!*count) + return ERR_PTR(-EINVAL); + + if (!access_ok(user_buf, *count)) + return ERR_PTR(-EFAULT); + + buf = (void *)get_zeroed_page(GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + nbytes = min_t(size_t, *count, PAGE_SIZE); + if (copy_from_user(buf, user_buf, nbytes)) { + free_page((unsigned long)buf); + return ERR_PTR(-EFAULT); + } + + *count = nbytes; + return buf; +} + +static bool parse_line(char **line, u32 *offs, u32 *val, int short_fmt_len, + int long_fmt_len) +{ + char *token; + u32 v[5]; + int ret; + + token = strsep(line, "\n"); + if (!token) + return false; + + /* + * For Adapter/Router configuration space: + * Short format is: offset value\n + * v[0] v[1] + * Long format as produced from the read side: + * offset relative_offset cap_id vs_cap_id value\n + * v[0] v[1] v[2] v[3] v[4] + * + * For Counter configuration space: + * Short format is: offset\n + * v[0] + * Long format as produced from the read side: + * offset relative_offset counter_id value\n + * v[0] v[1] v[2] v[3] + */ + ret = sscanf(token, "%i %i %i %i %i", &v[0], &v[1], &v[2], &v[3], &v[4]); + /* In case of Counters, clear counter, "val" content is NA */ + if (ret == short_fmt_len) { + *offs = v[0]; + *val = v[short_fmt_len - 1]; + return true; + } else if (ret == long_fmt_len) { + *offs = v[0]; + *val = v[long_fmt_len - 1]; + return true; + } + + return false; +} + +#if IS_ENABLED(CONFIG_USB4_DEBUGFS_WRITE) +static ssize_t regs_write(struct tb_switch *sw, struct tb_port *port, + const char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct tb *tb = sw->tb; + char *line, *buf; + u32 val, offset; + int ret = 0; + + buf = validate_and_copy_from_user(user_buf, &count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out; + } + + /* User did hardware changes behind the driver's back */ + add_taint(TAINT_USER, LOCKDEP_STILL_OK); + + line = buf; + while (parse_line(&line, &offset, &val, 2, 5)) { + if (port) + ret = tb_port_write(port, &val, TB_CFG_PORT, offset, 1); + else + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1); + if (ret) + break; + } + + mutex_unlock(&tb->lock); + +out: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + free_page((unsigned long)buf); + + return ret < 0 ? ret : count; +} + +static ssize_t port_regs_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_port *port = s->private; + + return regs_write(port->sw, port, user_buf, count, ppos); +} + +static ssize_t switch_regs_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_switch *sw = s->private; + + return regs_write(sw, NULL, user_buf, count, ppos); +} +#define DEBUGFS_MODE 0600 +#else +#define port_regs_write NULL +#define switch_regs_write NULL +#define DEBUGFS_MODE 0400 +#endif + +#if IS_ENABLED(CONFIG_USB4_DEBUGFS_MARGINING) +/** + * struct tb_margining - Lane margining support + * @caps: Port lane margining capabilities + * @results: Last lane margining results + * @lanes: %0, %1 or %7 (all) + * @min_ber_level: Minimum supported BER level contour value + * @max_ber_level: Maximum supported BER level contour value + * @ber_level: Current BER level contour value + * @voltage_steps: Number of mandatory voltage steps + * @max_voltage_offset: Maximum mandatory voltage offset (in mV) + * @time_steps: Number of time margin steps + * @max_time_offset: Maximum time margin offset (in mUI) + * @software: %true if software margining is used instead of hardware + * @time: %true if time margining is used instead of voltage + * @right_high: %false if left/low margin test is performed, %true if + * right/high + */ +struct tb_margining { + u32 caps[2]; + u32 results[2]; + unsigned int lanes; + unsigned int min_ber_level; + unsigned int max_ber_level; + unsigned int ber_level; + unsigned int voltage_steps; + unsigned int max_voltage_offset; + unsigned int time_steps; + unsigned int max_time_offset; + bool software; + bool time; + bool right_high; +}; + +static bool supports_software(const struct usb4_port *usb4) +{ + return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_MODES_SW; +} + +static bool supports_hardware(const struct usb4_port *usb4) +{ + return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_MODES_HW; +} + +static bool both_lanes(const struct usb4_port *usb4) +{ + return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_2_LANES; +} + +static unsigned int independent_voltage_margins(const struct usb4_port *usb4) +{ + return (usb4->margining->caps[0] & USB4_MARGIN_CAP_0_VOLTAGE_INDP_MASK) >> + USB4_MARGIN_CAP_0_VOLTAGE_INDP_SHIFT; +} + +static bool supports_time(const struct usb4_port *usb4) +{ + return usb4->margining->caps[0] & USB4_MARGIN_CAP_0_TIME; +} + +/* Only applicable if supports_time() returns true */ +static unsigned int independent_time_margins(const struct usb4_port *usb4) +{ + return (usb4->margining->caps[1] & USB4_MARGIN_CAP_1_TIME_INDP_MASK) >> + USB4_MARGIN_CAP_1_TIME_INDP_SHIFT; +} + +static ssize_t +margining_ber_level_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + unsigned int val; + int ret = 0; + char *buf; + + if (mutex_lock_interruptible(&tb->lock)) + return -ERESTARTSYS; + + if (usb4->margining->software) { + ret = -EINVAL; + goto out_unlock; + } + + buf = validate_and_copy_from_user(user_buf, &count); + if (IS_ERR(buf)) { + ret = PTR_ERR(buf); + goto out_unlock; + } + + buf[count - 1] = '\0'; + + ret = kstrtouint(buf, 10, &val); + if (ret) + goto out_free; + + if (val < usb4->margining->min_ber_level || + val > usb4->margining->max_ber_level) { + ret = -EINVAL; + goto out_free; + } + + usb4->margining->ber_level = val; + +out_free: + free_page((unsigned long)buf); +out_unlock: + mutex_unlock(&tb->lock); + + return ret < 0 ? ret : count; +} + +static void ber_level_show(struct seq_file *s, unsigned int val) +{ + if (val % 2) + seq_printf(s, "3 * 1e%d (%u)\n", -12 + (val + 1) / 2, val); + else + seq_printf(s, "1e%d (%u)\n", -12 + val / 2, val); +} + +static int margining_ber_level_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + + if (usb4->margining->software) + return -EINVAL; + ber_level_show(s, usb4->margining->ber_level); + return 0; +} +DEBUGFS_ATTR_RW(margining_ber_level); + +static int margining_caps_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + u32 cap0, cap1; + + if (mutex_lock_interruptible(&tb->lock)) + return -ERESTARTSYS; + + /* Dump the raw caps first */ + cap0 = usb4->margining->caps[0]; + seq_printf(s, "0x%08x\n", cap0); + cap1 = usb4->margining->caps[1]; + seq_printf(s, "0x%08x\n", cap1); + + seq_printf(s, "# software margining: %s\n", + supports_software(usb4) ? "yes" : "no"); + if (supports_hardware(usb4)) { + seq_puts(s, "# hardware margining: yes\n"); + seq_puts(s, "# minimum BER level contour: "); + ber_level_show(s, usb4->margining->min_ber_level); + seq_puts(s, "# maximum BER level contour: "); + ber_level_show(s, usb4->margining->max_ber_level); + } else { + seq_puts(s, "# hardware margining: no\n"); + } + + seq_printf(s, "# both lanes simultaneously: %s\n", + both_lanes(usb4) ? "yes" : "no"); + seq_printf(s, "# voltage margin steps: %u\n", + usb4->margining->voltage_steps); + seq_printf(s, "# maximum voltage offset: %u mV\n", + usb4->margining->max_voltage_offset); + + switch (independent_voltage_margins(usb4)) { + case USB4_MARGIN_CAP_0_VOLTAGE_MIN: + seq_puts(s, "# returns minimum between high and low voltage margins\n"); + break; + case USB4_MARGIN_CAP_0_VOLTAGE_HL: + seq_puts(s, "# returns high or low voltage margin\n"); + break; + case USB4_MARGIN_CAP_0_VOLTAGE_BOTH: + seq_puts(s, "# returns both high and low margins\n"); + break; + } + + if (supports_time(usb4)) { + seq_puts(s, "# time margining: yes\n"); + seq_printf(s, "# time margining is destructive: %s\n", + cap1 & USB4_MARGIN_CAP_1_TIME_DESTR ? "yes" : "no"); + + switch (independent_time_margins(usb4)) { + case USB4_MARGIN_CAP_1_TIME_MIN: + seq_puts(s, "# returns minimum between left and right time margins\n"); + break; + case USB4_MARGIN_CAP_1_TIME_LR: + seq_puts(s, "# returns left or right margin\n"); + break; + case USB4_MARGIN_CAP_1_TIME_BOTH: + seq_puts(s, "# returns both left and right margins\n"); + break; + } + + seq_printf(s, "# time margin steps: %u\n", + usb4->margining->time_steps); + seq_printf(s, "# maximum time offset: %u mUI\n", + usb4->margining->max_time_offset); + } else { + seq_puts(s, "# time margining: no\n"); + } + + mutex_unlock(&tb->lock); + return 0; +} +DEBUGFS_ATTR_RO(margining_caps); + +static ssize_t +margining_lanes_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + int ret = 0; + char *buf; + + buf = validate_and_copy_from_user(user_buf, &count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + buf[count - 1] = '\0'; + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_free; + } + + if (!strcmp(buf, "0")) { + usb4->margining->lanes = 0; + } else if (!strcmp(buf, "1")) { + usb4->margining->lanes = 1; + } else if (!strcmp(buf, "all")) { + /* Needs to be supported */ + if (both_lanes(usb4)) + usb4->margining->lanes = 7; + else + ret = -EINVAL; + } else { + ret = -EINVAL; + } + + mutex_unlock(&tb->lock); + +out_free: + free_page((unsigned long)buf); + return ret < 0 ? ret : count; +} + +static int margining_lanes_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + unsigned int lanes; + + if (mutex_lock_interruptible(&tb->lock)) + return -ERESTARTSYS; + + lanes = usb4->margining->lanes; + if (both_lanes(usb4)) { + if (!lanes) + seq_puts(s, "[0] 1 all\n"); + else if (lanes == 1) + seq_puts(s, "0 [1] all\n"); + else + seq_puts(s, "0 1 [all]\n"); + } else { + if (!lanes) + seq_puts(s, "[0] 1\n"); + else + seq_puts(s, "0 [1]\n"); + } + + mutex_unlock(&tb->lock); + return 0; +} +DEBUGFS_ATTR_RW(margining_lanes); + +static ssize_t margining_mode_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + int ret = 0; + char *buf; + + buf = validate_and_copy_from_user(user_buf, &count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + buf[count - 1] = '\0'; + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_free; + } + + if (!strcmp(buf, "software")) { + if (supports_software(usb4)) + usb4->margining->software = true; + else + ret = -EINVAL; + } else if (!strcmp(buf, "hardware")) { + if (supports_hardware(usb4)) + usb4->margining->software = false; + else + ret = -EINVAL; + } else { + ret = -EINVAL; + } + + mutex_unlock(&tb->lock); + +out_free: + free_page((unsigned long)buf); + return ret ? ret : count; +} + +static int margining_mode_show(struct seq_file *s, void *not_used) +{ + const struct tb_port *port = s->private; + const struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + const char *space = ""; + + if (mutex_lock_interruptible(&tb->lock)) + return -ERESTARTSYS; + + if (supports_software(usb4)) { + if (usb4->margining->software) + seq_puts(s, "[software]"); + else + seq_puts(s, "software"); + space = " "; + } + if (supports_hardware(usb4)) { + if (usb4->margining->software) + seq_printf(s, "%shardware", space); + else + seq_printf(s, "%s[hardware]", space); + } + + mutex_unlock(&tb->lock); + + seq_puts(s, "\n"); + return 0; +} +DEBUGFS_ATTR_RW(margining_mode); + +static int margining_run_write(void *data, u64 val) +{ + struct tb_port *port = data; + struct usb4_port *usb4 = port->usb4; + struct tb_switch *sw = port->sw; + struct tb_margining *margining; + struct tb *tb = sw->tb; + int ret; + + if (val != 1) + return -EINVAL; + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_rpm_put; + } + + /* + * CL states may interfere with lane margining so inform the user know + * and bail out. + */ + if (tb_port_is_clx_enabled(port, TB_CL1 | TB_CL2)) { + tb_port_warn(port, + "CL states are enabled, Disable them with clx=0 and re-connect\n"); + ret = -EINVAL; + goto out_unlock; + } + + margining = usb4->margining; + + if (margining->software) { + tb_port_dbg(port, "running software %s lane margining for lanes %u\n", + margining->time ? "time" : "voltage", margining->lanes); + ret = usb4_port_sw_margin(port, margining->lanes, margining->time, + margining->right_high, + USB4_MARGIN_SW_COUNTER_CLEAR); + if (ret) + goto out_unlock; + + ret = usb4_port_sw_margin_errors(port, &margining->results[0]); + } else { + tb_port_dbg(port, "running hardware %s lane margining for lanes %u\n", + margining->time ? "time" : "voltage", margining->lanes); + /* Clear the results */ + margining->results[0] = 0; + margining->results[1] = 0; + ret = usb4_port_hw_margin(port, margining->lanes, + margining->ber_level, margining->time, + margining->right_high, margining->results); + } + +out_unlock: + mutex_unlock(&tb->lock); +out_rpm_put: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + + return ret; +} +DEFINE_DEBUGFS_ATTRIBUTE(margining_run_fops, NULL, margining_run_write, + "%llu\n"); + +static ssize_t margining_results_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + + if (mutex_lock_interruptible(&tb->lock)) + return -ERESTARTSYS; + + /* Just clear the results */ + usb4->margining->results[0] = 0; + usb4->margining->results[1] = 0; + + mutex_unlock(&tb->lock); + return count; +} + +static void voltage_margin_show(struct seq_file *s, + const struct tb_margining *margining, u8 val) +{ + unsigned int tmp, voltage; + + tmp = val & USB4_MARGIN_HW_RES_1_MARGIN_MASK; + voltage = tmp * margining->max_voltage_offset / margining->voltage_steps; + seq_printf(s, "%u mV (%u)", voltage, tmp); + if (val & USB4_MARGIN_HW_RES_1_EXCEEDS) + seq_puts(s, " exceeds maximum"); + seq_puts(s, "\n"); +} + +static void time_margin_show(struct seq_file *s, + const struct tb_margining *margining, u8 val) +{ + unsigned int tmp, interval; + + tmp = val & USB4_MARGIN_HW_RES_1_MARGIN_MASK; + interval = tmp * margining->max_time_offset / margining->time_steps; + seq_printf(s, "%u mUI (%u)", interval, tmp); + if (val & USB4_MARGIN_HW_RES_1_EXCEEDS) + seq_puts(s, " exceeds maximum"); + seq_puts(s, "\n"); +} + +static int margining_results_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb_margining *margining; + struct tb *tb = port->sw->tb; + + if (mutex_lock_interruptible(&tb->lock)) + return -ERESTARTSYS; + + margining = usb4->margining; + /* Dump the raw results first */ + seq_printf(s, "0x%08x\n", margining->results[0]); + /* Only the hardware margining has two result dwords */ + if (!margining->software) { + unsigned int val; + + seq_printf(s, "0x%08x\n", margining->results[1]); + + if (margining->time) { + if (!margining->lanes || margining->lanes == 7) { + val = margining->results[1]; + seq_puts(s, "# lane 0 right time margin: "); + time_margin_show(s, margining, val); + val = margining->results[1] >> + USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT; + seq_puts(s, "# lane 0 left time margin: "); + time_margin_show(s, margining, val); + } + if (margining->lanes == 1 || margining->lanes == 7) { + val = margining->results[1] >> + USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT; + seq_puts(s, "# lane 1 right time margin: "); + time_margin_show(s, margining, val); + val = margining->results[1] >> + USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT; + seq_puts(s, "# lane 1 left time margin: "); + time_margin_show(s, margining, val); + } + } else { + if (!margining->lanes || margining->lanes == 7) { + val = margining->results[1]; + seq_puts(s, "# lane 0 high voltage margin: "); + voltage_margin_show(s, margining, val); + val = margining->results[1] >> + USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT; + seq_puts(s, "# lane 0 low voltage margin: "); + voltage_margin_show(s, margining, val); + } + if (margining->lanes == 1 || margining->lanes == 7) { + val = margining->results[1] >> + USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT; + seq_puts(s, "# lane 1 high voltage margin: "); + voltage_margin_show(s, margining, val); + val = margining->results[1] >> + USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT; + seq_puts(s, "# lane 1 low voltage margin: "); + voltage_margin_show(s, margining, val); + } + } + } + + mutex_unlock(&tb->lock); + return 0; +} +DEBUGFS_ATTR_RW(margining_results); + +static ssize_t margining_test_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + int ret = 0; + char *buf; + + buf = validate_and_copy_from_user(user_buf, &count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + buf[count - 1] = '\0'; + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_free; + } + + if (!strcmp(buf, "time") && supports_time(usb4)) + usb4->margining->time = true; + else if (!strcmp(buf, "voltage")) + usb4->margining->time = false; + else + ret = -EINVAL; + + mutex_unlock(&tb->lock); + +out_free: + free_page((unsigned long)buf); + return ret ? ret : count; +} + +static int margining_test_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + + if (mutex_lock_interruptible(&tb->lock)) + return -ERESTARTSYS; + + if (supports_time(usb4)) { + if (usb4->margining->time) + seq_puts(s, "voltage [time]\n"); + else + seq_puts(s, "[voltage] time\n"); + } else { + seq_puts(s, "[voltage]\n"); + } + + mutex_unlock(&tb->lock); + return 0; +} +DEBUGFS_ATTR_RW(margining_test); + +static ssize_t margining_margin_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + int ret = 0; + char *buf; + + buf = validate_and_copy_from_user(user_buf, &count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + buf[count - 1] = '\0'; + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_free; + } + + if (usb4->margining->time) { + if (!strcmp(buf, "left")) + usb4->margining->right_high = false; + else if (!strcmp(buf, "right")) + usb4->margining->right_high = true; + else + ret = -EINVAL; + } else { + if (!strcmp(buf, "low")) + usb4->margining->right_high = false; + else if (!strcmp(buf, "high")) + usb4->margining->right_high = true; + else + ret = -EINVAL; + } + + mutex_unlock(&tb->lock); + +out_free: + free_page((unsigned long)buf); + return ret ? ret : count; +} + +static int margining_margin_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct usb4_port *usb4 = port->usb4; + struct tb *tb = port->sw->tb; + + if (mutex_lock_interruptible(&tb->lock)) + return -ERESTARTSYS; + + if (usb4->margining->time) { + if (usb4->margining->right_high) + seq_puts(s, "left [right]\n"); + else + seq_puts(s, "[left] right\n"); + } else { + if (usb4->margining->right_high) + seq_puts(s, "low [high]\n"); + else + seq_puts(s, "[low] high\n"); + } + + mutex_unlock(&tb->lock); + return 0; +} +DEBUGFS_ATTR_RW(margining_margin); + +static void margining_port_init(struct tb_port *port) +{ + struct tb_margining *margining; + struct dentry *dir, *parent; + struct usb4_port *usb4; + char dir_name[10]; + unsigned int val; + int ret; + + usb4 = port->usb4; + if (!usb4) + return; + + snprintf(dir_name, sizeof(dir_name), "port%d", port->port); + parent = debugfs_lookup(dir_name, port->sw->debugfs_dir); + + margining = kzalloc(sizeof(*margining), GFP_KERNEL); + if (!margining) + return; + + ret = usb4_port_margining_caps(port, margining->caps); + if (ret) { + kfree(margining); + return; + } + + usb4->margining = margining; + + /* Set the initial mode */ + if (supports_software(usb4)) + margining->software = true; + + val = (margining->caps[0] & USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK) >> + USB4_MARGIN_CAP_0_VOLTAGE_STEPS_SHIFT; + margining->voltage_steps = val; + val = (margining->caps[0] & USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK) >> + USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_SHIFT; + margining->max_voltage_offset = 74 + val * 2; + + if (supports_time(usb4)) { + val = (margining->caps[1] & USB4_MARGIN_CAP_1_TIME_STEPS_MASK) >> + USB4_MARGIN_CAP_1_TIME_STEPS_SHIFT; + margining->time_steps = val; + val = (margining->caps[1] & USB4_MARGIN_CAP_1_TIME_OFFSET_MASK) >> + USB4_MARGIN_CAP_1_TIME_OFFSET_SHIFT; + /* + * Store it as mUI (milli Unit Interval) because we want + * to keep it as integer. + */ + margining->max_time_offset = 200 + 10 * val; + } + + dir = debugfs_create_dir("margining", parent); + if (supports_hardware(usb4)) { + val = (margining->caps[1] & USB4_MARGIN_CAP_1_MIN_BER_MASK) >> + USB4_MARGIN_CAP_1_MIN_BER_SHIFT; + margining->min_ber_level = val; + val = (margining->caps[1] & USB4_MARGIN_CAP_1_MAX_BER_MASK) >> + USB4_MARGIN_CAP_1_MAX_BER_SHIFT; + margining->max_ber_level = val; + + /* Set the default to minimum */ + margining->ber_level = margining->min_ber_level; + + debugfs_create_file("ber_level_contour", 0400, dir, port, + &margining_ber_level_fops); + } + debugfs_create_file("caps", 0400, dir, port, &margining_caps_fops); + debugfs_create_file("lanes", 0600, dir, port, &margining_lanes_fops); + debugfs_create_file("mode", 0600, dir, port, &margining_mode_fops); + debugfs_create_file("run", 0600, dir, port, &margining_run_fops); + debugfs_create_file("results", 0600, dir, port, &margining_results_fops); + debugfs_create_file("test", 0600, dir, port, &margining_test_fops); + if (independent_voltage_margins(usb4) || + (supports_time(usb4) && independent_time_margins(usb4))) + debugfs_create_file("margin", 0600, dir, port, &margining_margin_fops); +} + +static void margining_port_remove(struct tb_port *port) +{ + struct dentry *parent; + char dir_name[10]; + + if (!port->usb4) + return; + + snprintf(dir_name, sizeof(dir_name), "port%d", port->port); + parent = debugfs_lookup(dir_name, port->sw->debugfs_dir); + debugfs_remove_recursive(debugfs_lookup("margining", parent)); + + kfree(port->usb4->margining); + port->usb4->margining = NULL; +} + +static void margining_switch_init(struct tb_switch *sw) +{ + struct tb_port *upstream, *downstream; + struct tb_switch *parent_sw; + u64 route = tb_route(sw); + + if (!route) + return; + + upstream = tb_upstream_port(sw); + parent_sw = tb_switch_parent(sw); + downstream = tb_port_at(route, parent_sw); + + margining_port_init(downstream); + margining_port_init(upstream); +} + +static void margining_switch_remove(struct tb_switch *sw) +{ + struct tb_switch *parent_sw; + struct tb_port *downstream; + u64 route = tb_route(sw); + + if (!route) + return; + + /* + * Upstream is removed with the router itself but we need to + * remove the downstream port margining directory. + */ + parent_sw = tb_switch_parent(sw); + downstream = tb_port_at(route, parent_sw); + margining_port_remove(downstream); +} + +static void margining_xdomain_init(struct tb_xdomain *xd) +{ + struct tb_switch *parent_sw; + struct tb_port *downstream; + + parent_sw = tb_xdomain_parent(xd); + downstream = tb_port_at(xd->route, parent_sw); + + margining_port_init(downstream); +} + +static void margining_xdomain_remove(struct tb_xdomain *xd) +{ + struct tb_switch *parent_sw; + struct tb_port *downstream; + + parent_sw = tb_xdomain_parent(xd); + downstream = tb_port_at(xd->route, parent_sw); + margining_port_remove(downstream); +} +#else +static inline void margining_switch_init(struct tb_switch *sw) { } +static inline void margining_switch_remove(struct tb_switch *sw) { } +static inline void margining_xdomain_init(struct tb_xdomain *xd) { } +static inline void margining_xdomain_remove(struct tb_xdomain *xd) { } +#endif + +static int port_clear_all_counters(struct tb_port *port) +{ + u32 *buf; + int ret; + + buf = kcalloc(COUNTER_SET_LEN * port->config.max_counters, sizeof(u32), + GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = tb_port_write(port, buf, TB_CFG_COUNTERS, 0, + COUNTER_SET_LEN * port->config.max_counters); + kfree(buf); + + return ret; +} + +static ssize_t counters_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_port *port = s->private; + struct tb_switch *sw = port->sw; + struct tb *tb = port->sw->tb; + char *buf; + int ret; + + buf = validate_and_copy_from_user(user_buf, &count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out; + } + + /* If written delimiter only, clear all counters in one shot */ + if (buf[0] == '\n') { + ret = port_clear_all_counters(port); + } else { + char *line = buf; + u32 val, offset; + + ret = -EINVAL; + while (parse_line(&line, &offset, &val, 1, 4)) { + ret = tb_port_write(port, &val, TB_CFG_COUNTERS, + offset, 1); + if (ret) + break; + } + } + + mutex_unlock(&tb->lock); + +out: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + free_page((unsigned long)buf); + + return ret < 0 ? ret : count; +} + +static void cap_show_by_dw(struct seq_file *s, struct tb_switch *sw, + struct tb_port *port, unsigned int cap, + unsigned int offset, u8 cap_id, u8 vsec_id, + int dwords) +{ + int i, ret; + u32 data; + + for (i = 0; i < dwords; i++) { + if (port) + ret = tb_port_read(port, &data, TB_CFG_PORT, cap + offset + i, 1); + else + ret = tb_sw_read(sw, &data, TB_CFG_SWITCH, cap + offset + i, 1); + if (ret) { + seq_printf(s, "0x%04x <not accessible>\n", cap + offset + i); + continue; + } + + seq_printf(s, "0x%04x %4d 0x%02x 0x%02x 0x%08x\n", cap + offset + i, + offset + i, cap_id, vsec_id, data); + } +} + +static void cap_show(struct seq_file *s, struct tb_switch *sw, + struct tb_port *port, unsigned int cap, u8 cap_id, + u8 vsec_id, int length) +{ + int ret, offset = 0; + + while (length > 0) { + int i, dwords = min(length, TB_MAX_CONFIG_RW_LENGTH); + u32 data[TB_MAX_CONFIG_RW_LENGTH]; + + if (port) + ret = tb_port_read(port, data, TB_CFG_PORT, cap + offset, + dwords); + else + ret = tb_sw_read(sw, data, TB_CFG_SWITCH, cap + offset, dwords); + if (ret) { + cap_show_by_dw(s, sw, port, cap, offset, cap_id, vsec_id, length); + return; + } + + for (i = 0; i < dwords; i++) { + seq_printf(s, "0x%04x %4d 0x%02x 0x%02x 0x%08x\n", + cap + offset + i, offset + i, + cap_id, vsec_id, data[i]); + } + + length -= dwords; + offset += dwords; + } +} + +static void port_cap_show(struct tb_port *port, struct seq_file *s, + unsigned int cap) +{ + struct tb_cap_any header; + u8 vsec_id = 0; + size_t length; + int ret; + + ret = tb_port_read(port, &header, TB_CFG_PORT, cap, 1); + if (ret) { + seq_printf(s, "0x%04x <capability read failed>\n", cap); + return; + } + + switch (header.basic.cap) { + case TB_PORT_CAP_PHY: + length = PORT_CAP_LANE_LEN; + break; + + case TB_PORT_CAP_TIME1: + length = PORT_CAP_TMU_LEN; + break; + + case TB_PORT_CAP_POWER: + length = PORT_CAP_POWER_LEN; + break; + + case TB_PORT_CAP_ADAP: + if (tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) { + length = PORT_CAP_PCIE_LEN; + } else if (tb_port_is_dpin(port) || tb_port_is_dpout(port)) { + length = PORT_CAP_DP_LEN; + } else if (tb_port_is_usb3_down(port) || + tb_port_is_usb3_up(port)) { + length = PORT_CAP_USB3_LEN; + } else { + seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n", + cap, header.basic.cap); + return; + } + break; + + case TB_PORT_CAP_VSE: + if (!header.extended_short.length) { + ret = tb_port_read(port, (u32 *)&header + 1, TB_CFG_PORT, + cap + 1, 1); + if (ret) { + seq_printf(s, "0x%04x <capability read failed>\n", + cap + 1); + return; + } + length = header.extended_long.length; + vsec_id = header.extended_short.vsec_id; + } else { + length = header.extended_short.length; + vsec_id = header.extended_short.vsec_id; + } + break; + + case TB_PORT_CAP_USB4: + length = PORT_CAP_USB4_LEN; + break; + + default: + seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n", + cap, header.basic.cap); + return; + } + + cap_show(s, NULL, port, cap, header.basic.cap, vsec_id, length); +} + +static void port_caps_show(struct tb_port *port, struct seq_file *s) +{ + int cap; + + cap = tb_port_next_cap(port, 0); + while (cap > 0) { + port_cap_show(port, s, cap); + cap = tb_port_next_cap(port, cap); + } +} + +static int port_basic_regs_show(struct tb_port *port, struct seq_file *s) +{ + u32 data[PORT_CAP_BASIC_LEN]; + int ret, i; + + ret = tb_port_read(port, data, TB_CFG_PORT, 0, ARRAY_SIZE(data)); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(data); i++) + seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]); + + return 0; +} + +static int port_regs_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct tb_switch *sw = port->sw; + struct tb *tb = sw->tb; + int ret; + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_rpm_put; + } + + seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n"); + + ret = port_basic_regs_show(port, s); + if (ret) + goto out_unlock; + + port_caps_show(port, s); + +out_unlock: + mutex_unlock(&tb->lock); +out_rpm_put: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + + return ret; +} +DEBUGFS_ATTR_RW(port_regs); + +static void switch_cap_show(struct tb_switch *sw, struct seq_file *s, + unsigned int cap) +{ + struct tb_cap_any header; + int ret, length; + u8 vsec_id = 0; + + ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, cap, 1); + if (ret) { + seq_printf(s, "0x%04x <capability read failed>\n", cap); + return; + } + + if (header.basic.cap == TB_SWITCH_CAP_VSE) { + if (!header.extended_short.length) { + ret = tb_sw_read(sw, (u32 *)&header + 1, TB_CFG_SWITCH, + cap + 1, 1); + if (ret) { + seq_printf(s, "0x%04x <capability read failed>\n", + cap + 1); + return; + } + length = header.extended_long.length; + } else { + length = header.extended_short.length; + } + vsec_id = header.extended_short.vsec_id; + } else { + if (header.basic.cap == TB_SWITCH_CAP_TMU) { + length = SWITCH_CAP_TMU_LEN; + } else { + seq_printf(s, "0x%04x <unknown capability 0x%02x>\n", + cap, header.basic.cap); + return; + } + } + + cap_show(s, sw, NULL, cap, header.basic.cap, vsec_id, length); +} + +static void switch_caps_show(struct tb_switch *sw, struct seq_file *s) +{ + int cap; + + cap = tb_switch_next_cap(sw, 0); + while (cap > 0) { + switch_cap_show(sw, s, cap); + cap = tb_switch_next_cap(sw, cap); + } +} + +static int switch_basic_regs_show(struct tb_switch *sw, struct seq_file *s) +{ + u32 data[SWITCH_CAP_BASIC_LEN]; + size_t dwords; + int ret, i; + + /* Only USB4 has the additional registers */ + if (tb_switch_is_usb4(sw)) + dwords = ARRAY_SIZE(data); + else + dwords = 7; + + ret = tb_sw_read(sw, data, TB_CFG_SWITCH, 0, dwords); + if (ret) + return ret; + + for (i = 0; i < dwords; i++) + seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]); + + return 0; +} + +static int switch_regs_show(struct seq_file *s, void *not_used) +{ + struct tb_switch *sw = s->private; + struct tb *tb = sw->tb; + int ret; + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_rpm_put; + } + + seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n"); + + ret = switch_basic_regs_show(sw, s); + if (ret) + goto out_unlock; + + switch_caps_show(sw, s); + +out_unlock: + mutex_unlock(&tb->lock); +out_rpm_put: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + + return ret; +} +DEBUGFS_ATTR_RW(switch_regs); + +static int path_show_one(struct tb_port *port, struct seq_file *s, int hopid) +{ + u32 data[PATH_LEN]; + int ret, i; + + ret = tb_port_read(port, data, TB_CFG_HOPS, hopid * PATH_LEN, + ARRAY_SIZE(data)); + if (ret) { + seq_printf(s, "0x%04x <not accessible>\n", hopid * PATH_LEN); + return ret; + } + + for (i = 0; i < ARRAY_SIZE(data); i++) { + seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n", + hopid * PATH_LEN + i, i, hopid, data[i]); + } + + return 0; +} + +static int path_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct tb_switch *sw = port->sw; + struct tb *tb = sw->tb; + int start, i, ret = 0; + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_rpm_put; + } + + seq_puts(s, "# offset relative_offset in_hop_id value\n"); + + /* NHI and lane adapters have entry for path 0 */ + if (tb_port_is_null(port) || tb_port_is_nhi(port)) { + ret = path_show_one(port, s, 0); + if (ret) + goto out_unlock; + } + + start = tb_port_is_nhi(port) ? 1 : TB_PATH_MIN_HOPID; + + for (i = start; i <= port->config.max_in_hop_id; i++) { + ret = path_show_one(port, s, i); + if (ret) + break; + } + +out_unlock: + mutex_unlock(&tb->lock); +out_rpm_put: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + + return ret; +} +DEBUGFS_ATTR_RO(path); + +static int counter_set_regs_show(struct tb_port *port, struct seq_file *s, + int counter) +{ + u32 data[COUNTER_SET_LEN]; + int ret, i; + + ret = tb_port_read(port, data, TB_CFG_COUNTERS, + counter * COUNTER_SET_LEN, ARRAY_SIZE(data)); + if (ret) { + seq_printf(s, "0x%04x <not accessible>\n", + counter * COUNTER_SET_LEN); + return ret; + } + + for (i = 0; i < ARRAY_SIZE(data); i++) { + seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n", + counter * COUNTER_SET_LEN + i, i, counter, data[i]); + } + + return 0; +} + +static int counters_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct tb_switch *sw = port->sw; + struct tb *tb = sw->tb; + int i, ret = 0; + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out; + } + + seq_puts(s, "# offset relative_offset counter_id value\n"); + + for (i = 0; i < port->config.max_counters; i++) { + ret = counter_set_regs_show(port, s, i); + if (ret) + break; + } + + mutex_unlock(&tb->lock); + +out: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + + return ret; +} +DEBUGFS_ATTR_RW(counters); + +/** + * tb_switch_debugfs_init() - Add debugfs entries for router + * @sw: Pointer to the router + * + * Adds debugfs directories and files for given router. + */ +void tb_switch_debugfs_init(struct tb_switch *sw) +{ + struct dentry *debugfs_dir; + struct tb_port *port; + + debugfs_dir = debugfs_create_dir(dev_name(&sw->dev), tb_debugfs_root); + sw->debugfs_dir = debugfs_dir; + debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir, sw, + &switch_regs_fops); + + tb_switch_for_each_port(sw, port) { + struct dentry *debugfs_dir; + char dir_name[10]; + + if (port->disabled) + continue; + if (port->config.type == TB_TYPE_INACTIVE) + continue; + + snprintf(dir_name, sizeof(dir_name), "port%d", port->port); + debugfs_dir = debugfs_create_dir(dir_name, sw->debugfs_dir); + debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir, + port, &port_regs_fops); + debugfs_create_file("path", 0400, debugfs_dir, port, + &path_fops); + if (port->config.counters_support) + debugfs_create_file("counters", 0600, debugfs_dir, port, + &counters_fops); + } + + margining_switch_init(sw); +} + +/** + * tb_switch_debugfs_remove() - Remove all router debugfs entries + * @sw: Pointer to the router + * + * Removes all previously added debugfs entries under this router. + */ +void tb_switch_debugfs_remove(struct tb_switch *sw) +{ + margining_switch_remove(sw); + debugfs_remove_recursive(sw->debugfs_dir); +} + +void tb_xdomain_debugfs_init(struct tb_xdomain *xd) +{ + margining_xdomain_init(xd); +} + +void tb_xdomain_debugfs_remove(struct tb_xdomain *xd) +{ + margining_xdomain_remove(xd); +} + +/** + * tb_service_debugfs_init() - Add debugfs directory for service + * @svc: Thunderbolt service pointer + * + * Adds debugfs directory for service. + */ +void tb_service_debugfs_init(struct tb_service *svc) +{ + svc->debugfs_dir = debugfs_create_dir(dev_name(&svc->dev), + tb_debugfs_root); +} + +/** + * tb_service_debugfs_remove() - Remove service debugfs directory + * @svc: Thunderbolt service pointer + * + * Removes the previously created debugfs directory for @svc. + */ +void tb_service_debugfs_remove(struct tb_service *svc) +{ + debugfs_remove_recursive(svc->debugfs_dir); + svc->debugfs_dir = NULL; +} + +void tb_debugfs_init(void) +{ + tb_debugfs_root = debugfs_create_dir("thunderbolt", NULL); +} + +void tb_debugfs_exit(void) +{ + debugfs_remove_recursive(tb_debugfs_root); +} diff --git a/drivers/thunderbolt/dma_port.c b/drivers/thunderbolt/dma_port.c index 847dd07a7b17..9f20c7bbf0ce 100644 --- a/drivers/thunderbolt/dma_port.c +++ b/drivers/thunderbolt/dma_port.c @@ -299,15 +299,13 @@ static int dma_port_request(struct tb_dma_port *dma, u32 in, return status_to_errno(out); } -static int dma_port_flash_read_block(struct tb_dma_port *dma, u32 address, - void *buf, u32 size) +static int dma_port_flash_read_block(void *data, unsigned int dwaddress, + void *buf, size_t dwords) { + struct tb_dma_port *dma = data; struct tb_switch *sw = dma->sw; - u32 in, dwaddress, dwords; int ret; - - dwaddress = address / 4; - dwords = size / 4; + u32 in; in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT; if (dwords < MAIL_DATA_DWORDS) @@ -323,28 +321,25 @@ static int dma_port_flash_read_block(struct tb_dma_port *dma, u32 address, dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT); } -static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address, - const void *buf, u32 size) +static int dma_port_flash_write_block(void *data, unsigned int dwaddress, + const void *buf, size_t dwords) { + struct tb_dma_port *dma = data; struct tb_switch *sw = dma->sw; - u32 in, dwaddress, dwords; int ret; - - dwords = size / 4; + u32 in; /* Write the block to MAIL_DATA registers */ ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port, dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT); + if (ret) + return ret; in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT; /* CSS header write is always done to the same magic address */ - if (address >= DMA_PORT_CSS_ADDRESS) { - dwaddress = DMA_PORT_CSS_ADDRESS; + if (dwaddress >= DMA_PORT_CSS_ADDRESS) in |= MAIL_IN_CSS; - } else { - dwaddress = address / 4; - } in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK; in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK; @@ -363,35 +358,8 @@ static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address, int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address, void *buf, size_t size) { - unsigned int retries = DMA_PORT_RETRIES; - unsigned int offset; - - offset = address & 3; - address = address & ~3; - - do { - u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4); - int ret; - - ret = dma_port_flash_read_block(dma, address, dma->buf, - ALIGN(nbytes, 4)); - if (ret) { - if (ret == -ETIMEDOUT) { - if (retries--) - continue; - ret = -EIO; - } - return ret; - } - - memcpy(buf, dma->buf + offset, nbytes); - - size -= nbytes; - address += nbytes; - buf += nbytes; - } while (size > 0); - - return 0; + return tb_nvm_read_data(address, buf, size, DMA_PORT_RETRIES, + dma_port_flash_read_block, dma); } /** @@ -408,40 +376,11 @@ int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address, int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address, const void *buf, size_t size) { - unsigned int retries = DMA_PORT_RETRIES; - unsigned int offset; - - if (address >= DMA_PORT_CSS_ADDRESS) { - offset = 0; - if (size > DMA_PORT_CSS_MAX_SIZE) - return -E2BIG; - } else { - offset = address & 3; - address = address & ~3; - } - - do { - u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4); - int ret; - - memcpy(dma->buf + offset, buf, nbytes); + if (address >= DMA_PORT_CSS_ADDRESS && size > DMA_PORT_CSS_MAX_SIZE) + return -E2BIG; - ret = dma_port_flash_write_block(dma, address, buf, nbytes); - if (ret) { - if (ret == -ETIMEDOUT) { - if (retries--) - continue; - ret = -EIO; - } - return ret; - } - - size -= nbytes; - address += nbytes; - buf += nbytes; - } while (size > 0); - - return 0; + return tb_nvm_write_data(address, buf, size, DMA_PORT_RETRIES, + dma_port_flash_write_block, dma); } /** diff --git a/drivers/thunderbolt/dma_test.c b/drivers/thunderbolt/dma_test.c new file mode 100644 index 000000000000..3bedecb236e0 --- /dev/null +++ b/drivers/thunderbolt/dma_test.c @@ -0,0 +1,760 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DMA traffic test driver + * + * Copyright (C) 2020, Intel Corporation + * Authors: Isaac Hazan <isaac.hazan@intel.com> + * Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/completion.h> +#include <linux/debugfs.h> +#include <linux/module.h> +#include <linux/sizes.h> +#include <linux/thunderbolt.h> + +#define DMA_TEST_TX_RING_SIZE 64 +#define DMA_TEST_RX_RING_SIZE 256 +#define DMA_TEST_FRAME_SIZE SZ_4K +#define DMA_TEST_DATA_PATTERN 0x0123456789abcdefLL +#define DMA_TEST_MAX_PACKETS 1000 + +enum dma_test_frame_pdf { + DMA_TEST_PDF_FRAME_START = 1, + DMA_TEST_PDF_FRAME_END, +}; + +struct dma_test_frame { + struct dma_test *dma_test; + void *data; + struct ring_frame frame; +}; + +enum dma_test_test_error { + DMA_TEST_NO_ERROR, + DMA_TEST_INTERRUPTED, + DMA_TEST_BUFFER_ERROR, + DMA_TEST_DMA_ERROR, + DMA_TEST_CONFIG_ERROR, + DMA_TEST_SPEED_ERROR, + DMA_TEST_WIDTH_ERROR, + DMA_TEST_BONDING_ERROR, + DMA_TEST_PACKET_ERROR, +}; + +static const char * const dma_test_error_names[] = { + [DMA_TEST_NO_ERROR] = "no errors", + [DMA_TEST_INTERRUPTED] = "interrupted by signal", + [DMA_TEST_BUFFER_ERROR] = "no memory for packet buffers", + [DMA_TEST_DMA_ERROR] = "DMA ring setup failed", + [DMA_TEST_CONFIG_ERROR] = "configuration is not valid", + [DMA_TEST_SPEED_ERROR] = "unexpected link speed", + [DMA_TEST_WIDTH_ERROR] = "unexpected link width", + [DMA_TEST_BONDING_ERROR] = "lane bonding configuration error", + [DMA_TEST_PACKET_ERROR] = "packet check failed", +}; + +enum dma_test_result { + DMA_TEST_NOT_RUN, + DMA_TEST_SUCCESS, + DMA_TEST_FAIL, +}; + +static const char * const dma_test_result_names[] = { + [DMA_TEST_NOT_RUN] = "not run", + [DMA_TEST_SUCCESS] = "success", + [DMA_TEST_FAIL] = "failed", +}; + +/** + * struct dma_test - DMA test device driver private data + * @svc: XDomain service the driver is bound to + * @xd: XDomain the service belongs to + * @rx_ring: Software ring holding RX frames + * @rx_hopid: HopID used for receiving frames + * @tx_ring: Software ring holding TX frames + * @tx_hopid: HopID used for sending fames + * @packets_to_send: Number of packets to send + * @packets_to_receive: Number of packets to receive + * @packets_sent: Actual number of packets sent + * @packets_received: Actual number of packets received + * @link_speed: Expected link speed (Gb/s), %0 to use whatever is negotiated + * @link_width: Expected link width (Gb/s), %0 to use whatever is negotiated + * @crc_errors: Number of CRC errors during the test run + * @buffer_overflow_errors: Number of buffer overflow errors during the test + * run + * @result: Result of the last run + * @error_code: Error code of the last run + * @complete: Used to wait for the Rx to complete + * @lock: Lock serializing access to this structure + * @debugfs_dir: dentry of this dma_test + */ +struct dma_test { + const struct tb_service *svc; + struct tb_xdomain *xd; + struct tb_ring *rx_ring; + int rx_hopid; + struct tb_ring *tx_ring; + int tx_hopid; + unsigned int packets_to_send; + unsigned int packets_to_receive; + unsigned int packets_sent; + unsigned int packets_received; + unsigned int link_speed; + unsigned int link_width; + unsigned int crc_errors; + unsigned int buffer_overflow_errors; + enum dma_test_result result; + enum dma_test_test_error error_code; + struct completion complete; + struct mutex lock; + struct dentry *debugfs_dir; +}; + +/* DMA test property directory UUID: 3188cd10-6523-4a5a-a682-fdca07a248d8 */ +static const uuid_t dma_test_dir_uuid = + UUID_INIT(0x3188cd10, 0x6523, 0x4a5a, + 0xa6, 0x82, 0xfd, 0xca, 0x07, 0xa2, 0x48, 0xd8); + +static struct tb_property_dir *dma_test_dir; +static void *dma_test_pattern; + +static void dma_test_free_rings(struct dma_test *dt) +{ + if (dt->rx_ring) { + tb_xdomain_release_in_hopid(dt->xd, dt->rx_hopid); + tb_ring_free(dt->rx_ring); + dt->rx_ring = NULL; + } + if (dt->tx_ring) { + tb_xdomain_release_out_hopid(dt->xd, dt->tx_hopid); + tb_ring_free(dt->tx_ring); + dt->tx_ring = NULL; + } +} + +static int dma_test_start_rings(struct dma_test *dt) +{ + unsigned int flags = RING_FLAG_FRAME; + struct tb_xdomain *xd = dt->xd; + int ret, e2e_tx_hop = 0; + struct tb_ring *ring; + + /* + * If we are both sender and receiver (traffic goes over a + * special loopback dongle) enable E2E flow control. This avoids + * losing packets. + */ + if (dt->packets_to_send && dt->packets_to_receive) + flags |= RING_FLAG_E2E; + + if (dt->packets_to_send) { + ring = tb_ring_alloc_tx(xd->tb->nhi, -1, DMA_TEST_TX_RING_SIZE, + flags); + if (!ring) + return -ENOMEM; + + dt->tx_ring = ring; + e2e_tx_hop = ring->hop; + + ret = tb_xdomain_alloc_out_hopid(xd, -1); + if (ret < 0) { + dma_test_free_rings(dt); + return ret; + } + + dt->tx_hopid = ret; + } + + if (dt->packets_to_receive) { + u16 sof_mask, eof_mask; + + sof_mask = BIT(DMA_TEST_PDF_FRAME_START); + eof_mask = BIT(DMA_TEST_PDF_FRAME_END); + + ring = tb_ring_alloc_rx(xd->tb->nhi, -1, DMA_TEST_RX_RING_SIZE, + flags, e2e_tx_hop, sof_mask, eof_mask, + NULL, NULL); + if (!ring) { + dma_test_free_rings(dt); + return -ENOMEM; + } + + dt->rx_ring = ring; + + ret = tb_xdomain_alloc_in_hopid(xd, -1); + if (ret < 0) { + dma_test_free_rings(dt); + return ret; + } + + dt->rx_hopid = ret; + } + + ret = tb_xdomain_enable_paths(dt->xd, dt->tx_hopid, + dt->tx_ring ? dt->tx_ring->hop : 0, + dt->rx_hopid, + dt->rx_ring ? dt->rx_ring->hop : 0); + if (ret) { + dma_test_free_rings(dt); + return ret; + } + + if (dt->tx_ring) + tb_ring_start(dt->tx_ring); + if (dt->rx_ring) + tb_ring_start(dt->rx_ring); + + return 0; +} + +static void dma_test_stop_rings(struct dma_test *dt) +{ + int ret; + + if (dt->rx_ring) + tb_ring_stop(dt->rx_ring); + if (dt->tx_ring) + tb_ring_stop(dt->tx_ring); + + ret = tb_xdomain_disable_paths(dt->xd, dt->tx_hopid, + dt->tx_ring ? dt->tx_ring->hop : 0, + dt->rx_hopid, + dt->rx_ring ? dt->rx_ring->hop : 0); + if (ret) + dev_warn(&dt->svc->dev, "failed to disable DMA paths\n"); + + dma_test_free_rings(dt); +} + +static void dma_test_rx_callback(struct tb_ring *ring, struct ring_frame *frame, + bool canceled) +{ + struct dma_test_frame *tf = container_of(frame, typeof(*tf), frame); + struct dma_test *dt = tf->dma_test; + struct device *dma_dev = tb_ring_dma_device(dt->rx_ring); + + dma_unmap_single(dma_dev, tf->frame.buffer_phy, DMA_TEST_FRAME_SIZE, + DMA_FROM_DEVICE); + kfree(tf->data); + + if (canceled) { + kfree(tf); + return; + } + + dt->packets_received++; + dev_dbg(&dt->svc->dev, "packet %u/%u received\n", dt->packets_received, + dt->packets_to_receive); + + if (tf->frame.flags & RING_DESC_CRC_ERROR) + dt->crc_errors++; + if (tf->frame.flags & RING_DESC_BUFFER_OVERRUN) + dt->buffer_overflow_errors++; + + kfree(tf); + + if (dt->packets_received == dt->packets_to_receive) + complete(&dt->complete); +} + +static int dma_test_submit_rx(struct dma_test *dt, size_t npackets) +{ + struct device *dma_dev = tb_ring_dma_device(dt->rx_ring); + int i; + + for (i = 0; i < npackets; i++) { + struct dma_test_frame *tf; + dma_addr_t dma_addr; + + tf = kzalloc(sizeof(*tf), GFP_KERNEL); + if (!tf) + return -ENOMEM; + + tf->data = kzalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL); + if (!tf->data) { + kfree(tf); + return -ENOMEM; + } + + dma_addr = dma_map_single(dma_dev, tf->data, DMA_TEST_FRAME_SIZE, + DMA_FROM_DEVICE); + if (dma_mapping_error(dma_dev, dma_addr)) { + kfree(tf->data); + kfree(tf); + return -ENOMEM; + } + + tf->frame.buffer_phy = dma_addr; + tf->frame.callback = dma_test_rx_callback; + tf->dma_test = dt; + INIT_LIST_HEAD(&tf->frame.list); + + tb_ring_rx(dt->rx_ring, &tf->frame); + } + + return 0; +} + +static void dma_test_tx_callback(struct tb_ring *ring, struct ring_frame *frame, + bool canceled) +{ + struct dma_test_frame *tf = container_of(frame, typeof(*tf), frame); + struct dma_test *dt = tf->dma_test; + struct device *dma_dev = tb_ring_dma_device(dt->tx_ring); + + dma_unmap_single(dma_dev, tf->frame.buffer_phy, DMA_TEST_FRAME_SIZE, + DMA_TO_DEVICE); + kfree(tf->data); + kfree(tf); +} + +static int dma_test_submit_tx(struct dma_test *dt, size_t npackets) +{ + struct device *dma_dev = tb_ring_dma_device(dt->tx_ring); + int i; + + for (i = 0; i < npackets; i++) { + struct dma_test_frame *tf; + dma_addr_t dma_addr; + + tf = kzalloc(sizeof(*tf), GFP_KERNEL); + if (!tf) + return -ENOMEM; + + tf->frame.size = 0; /* means 4096 */ + tf->dma_test = dt; + + tf->data = kmemdup(dma_test_pattern, DMA_TEST_FRAME_SIZE, GFP_KERNEL); + if (!tf->data) { + kfree(tf); + return -ENOMEM; + } + + dma_addr = dma_map_single(dma_dev, tf->data, DMA_TEST_FRAME_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dma_dev, dma_addr)) { + kfree(tf->data); + kfree(tf); + return -ENOMEM; + } + + tf->frame.buffer_phy = dma_addr; + tf->frame.callback = dma_test_tx_callback; + tf->frame.sof = DMA_TEST_PDF_FRAME_START; + tf->frame.eof = DMA_TEST_PDF_FRAME_END; + INIT_LIST_HEAD(&tf->frame.list); + + dt->packets_sent++; + dev_dbg(&dt->svc->dev, "packet %u/%u sent\n", dt->packets_sent, + dt->packets_to_send); + + tb_ring_tx(dt->tx_ring, &tf->frame); + } + + return 0; +} + +#define DMA_TEST_DEBUGFS_ATTR(__fops, __get, __validate, __set) \ +static int __fops ## _show(void *data, u64 *val) \ +{ \ + struct tb_service *svc = data; \ + struct dma_test *dt = tb_service_get_drvdata(svc); \ + int ret; \ + \ + ret = mutex_lock_interruptible(&dt->lock); \ + if (ret) \ + return ret; \ + __get(dt, val); \ + mutex_unlock(&dt->lock); \ + return 0; \ +} \ +static int __fops ## _store(void *data, u64 val) \ +{ \ + struct tb_service *svc = data; \ + struct dma_test *dt = tb_service_get_drvdata(svc); \ + int ret; \ + \ + ret = __validate(val); \ + if (ret) \ + return ret; \ + ret = mutex_lock_interruptible(&dt->lock); \ + if (ret) \ + return ret; \ + __set(dt, val); \ + mutex_unlock(&dt->lock); \ + return 0; \ +} \ +DEFINE_DEBUGFS_ATTRIBUTE(__fops ## _fops, __fops ## _show, \ + __fops ## _store, "%llu\n") + +static void lanes_get(const struct dma_test *dt, u64 *val) +{ + *val = dt->link_width; +} + +static int lanes_validate(u64 val) +{ + return val > 2 ? -EINVAL : 0; +} + +static void lanes_set(struct dma_test *dt, u64 val) +{ + dt->link_width = val; +} +DMA_TEST_DEBUGFS_ATTR(lanes, lanes_get, lanes_validate, lanes_set); + +static void speed_get(const struct dma_test *dt, u64 *val) +{ + *val = dt->link_speed; +} + +static int speed_validate(u64 val) +{ + switch (val) { + case 20: + case 10: + case 0: + return 0; + default: + return -EINVAL; + } +} + +static void speed_set(struct dma_test *dt, u64 val) +{ + dt->link_speed = val; +} +DMA_TEST_DEBUGFS_ATTR(speed, speed_get, speed_validate, speed_set); + +static void packets_to_receive_get(const struct dma_test *dt, u64 *val) +{ + *val = dt->packets_to_receive; +} + +static int packets_to_receive_validate(u64 val) +{ + return val > DMA_TEST_MAX_PACKETS ? -EINVAL : 0; +} + +static void packets_to_receive_set(struct dma_test *dt, u64 val) +{ + dt->packets_to_receive = val; +} +DMA_TEST_DEBUGFS_ATTR(packets_to_receive, packets_to_receive_get, + packets_to_receive_validate, packets_to_receive_set); + +static void packets_to_send_get(const struct dma_test *dt, u64 *val) +{ + *val = dt->packets_to_send; +} + +static int packets_to_send_validate(u64 val) +{ + return val > DMA_TEST_MAX_PACKETS ? -EINVAL : 0; +} + +static void packets_to_send_set(struct dma_test *dt, u64 val) +{ + dt->packets_to_send = val; +} +DMA_TEST_DEBUGFS_ATTR(packets_to_send, packets_to_send_get, + packets_to_send_validate, packets_to_send_set); + +static int dma_test_set_bonding(struct dma_test *dt) +{ + switch (dt->link_width) { + case 2: + return tb_xdomain_lane_bonding_enable(dt->xd); + case 1: + tb_xdomain_lane_bonding_disable(dt->xd); + fallthrough; + default: + return 0; + } +} + +static bool dma_test_validate_config(struct dma_test *dt) +{ + if (!dt->packets_to_send && !dt->packets_to_receive) + return false; + if (dt->packets_to_send && dt->packets_to_receive && + dt->packets_to_send != dt->packets_to_receive) + return false; + return true; +} + +static void dma_test_check_errors(struct dma_test *dt, int ret) +{ + if (!dt->error_code) { + if (dt->link_speed && dt->xd->link_speed != dt->link_speed) { + dt->error_code = DMA_TEST_SPEED_ERROR; + } else if (dt->link_width && + dt->xd->link_width != dt->link_width) { + dt->error_code = DMA_TEST_WIDTH_ERROR; + } else if (dt->packets_to_send != dt->packets_sent || + dt->packets_to_receive != dt->packets_received || + dt->crc_errors || dt->buffer_overflow_errors) { + dt->error_code = DMA_TEST_PACKET_ERROR; + } else { + return; + } + } + + dt->result = DMA_TEST_FAIL; +} + +static int test_store(void *data, u64 val) +{ + struct tb_service *svc = data; + struct dma_test *dt = tb_service_get_drvdata(svc); + int ret; + + if (val != 1) + return -EINVAL; + + ret = mutex_lock_interruptible(&dt->lock); + if (ret) + return ret; + + dt->packets_sent = 0; + dt->packets_received = 0; + dt->crc_errors = 0; + dt->buffer_overflow_errors = 0; + dt->result = DMA_TEST_SUCCESS; + dt->error_code = DMA_TEST_NO_ERROR; + + dev_dbg(&svc->dev, "DMA test starting\n"); + if (dt->link_speed) + dev_dbg(&svc->dev, "link_speed: %u Gb/s\n", dt->link_speed); + if (dt->link_width) + dev_dbg(&svc->dev, "link_width: %u\n", dt->link_width); + dev_dbg(&svc->dev, "packets_to_send: %u\n", dt->packets_to_send); + dev_dbg(&svc->dev, "packets_to_receive: %u\n", dt->packets_to_receive); + + if (!dma_test_validate_config(dt)) { + dev_err(&svc->dev, "invalid test configuration\n"); + dt->error_code = DMA_TEST_CONFIG_ERROR; + goto out_unlock; + } + + ret = dma_test_set_bonding(dt); + if (ret) { + dev_err(&svc->dev, "failed to set lanes\n"); + dt->error_code = DMA_TEST_BONDING_ERROR; + goto out_unlock; + } + + ret = dma_test_start_rings(dt); + if (ret) { + dev_err(&svc->dev, "failed to enable DMA rings\n"); + dt->error_code = DMA_TEST_DMA_ERROR; + goto out_unlock; + } + + if (dt->packets_to_receive) { + reinit_completion(&dt->complete); + ret = dma_test_submit_rx(dt, dt->packets_to_receive); + if (ret) { + dev_err(&svc->dev, "failed to submit receive buffers\n"); + dt->error_code = DMA_TEST_BUFFER_ERROR; + goto out_stop; + } + } + + if (dt->packets_to_send) { + ret = dma_test_submit_tx(dt, dt->packets_to_send); + if (ret) { + dev_err(&svc->dev, "failed to submit transmit buffers\n"); + dt->error_code = DMA_TEST_BUFFER_ERROR; + goto out_stop; + } + } + + if (dt->packets_to_receive) { + ret = wait_for_completion_interruptible(&dt->complete); + if (ret) { + dt->error_code = DMA_TEST_INTERRUPTED; + goto out_stop; + } + } + +out_stop: + dma_test_stop_rings(dt); +out_unlock: + dma_test_check_errors(dt, ret); + mutex_unlock(&dt->lock); + + dev_dbg(&svc->dev, "DMA test %s\n", dma_test_result_names[dt->result]); + return ret; +} +DEFINE_DEBUGFS_ATTRIBUTE(test_fops, NULL, test_store, "%llu\n"); + +static int status_show(struct seq_file *s, void *not_used) +{ + struct tb_service *svc = s->private; + struct dma_test *dt = tb_service_get_drvdata(svc); + int ret; + + ret = mutex_lock_interruptible(&dt->lock); + if (ret) + return ret; + + seq_printf(s, "result: %s\n", dma_test_result_names[dt->result]); + if (dt->result == DMA_TEST_NOT_RUN) + goto out_unlock; + + seq_printf(s, "packets received: %u\n", dt->packets_received); + seq_printf(s, "packets sent: %u\n", dt->packets_sent); + seq_printf(s, "CRC errors: %u\n", dt->crc_errors); + seq_printf(s, "buffer overflow errors: %u\n", + dt->buffer_overflow_errors); + seq_printf(s, "error: %s\n", dma_test_error_names[dt->error_code]); + +out_unlock: + mutex_unlock(&dt->lock); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(status); + +static void dma_test_debugfs_init(struct tb_service *svc) +{ + struct dma_test *dt = tb_service_get_drvdata(svc); + + dt->debugfs_dir = debugfs_create_dir("dma_test", svc->debugfs_dir); + + debugfs_create_file("lanes", 0600, dt->debugfs_dir, svc, &lanes_fops); + debugfs_create_file("speed", 0600, dt->debugfs_dir, svc, &speed_fops); + debugfs_create_file("packets_to_receive", 0600, dt->debugfs_dir, svc, + &packets_to_receive_fops); + debugfs_create_file("packets_to_send", 0600, dt->debugfs_dir, svc, + &packets_to_send_fops); + debugfs_create_file("status", 0400, dt->debugfs_dir, svc, &status_fops); + debugfs_create_file("test", 0200, dt->debugfs_dir, svc, &test_fops); +} + +static int dma_test_probe(struct tb_service *svc, const struct tb_service_id *id) +{ + struct tb_xdomain *xd = tb_service_parent(svc); + struct dma_test *dt; + + dt = devm_kzalloc(&svc->dev, sizeof(*dt), GFP_KERNEL); + if (!dt) + return -ENOMEM; + + dt->svc = svc; + dt->xd = xd; + mutex_init(&dt->lock); + init_completion(&dt->complete); + + tb_service_set_drvdata(svc, dt); + dma_test_debugfs_init(svc); + + return 0; +} + +static void dma_test_remove(struct tb_service *svc) +{ + struct dma_test *dt = tb_service_get_drvdata(svc); + + mutex_lock(&dt->lock); + debugfs_remove_recursive(dt->debugfs_dir); + mutex_unlock(&dt->lock); +} + +static int __maybe_unused dma_test_suspend(struct device *dev) +{ + /* + * No need to do anything special here. If userspace is writing + * to the test attribute when suspend started, it comes out from + * wait_for_completion_interruptible() with -ERESTARTSYS and the + * DMA test fails tearing down the rings. Once userspace is + * thawed the kernel restarts the write syscall effectively + * re-running the test. + */ + return 0; +} + +static int __maybe_unused dma_test_resume(struct device *dev) +{ + return 0; +} + +static const struct dev_pm_ops dma_test_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(dma_test_suspend, dma_test_resume) +}; + +static const struct tb_service_id dma_test_ids[] = { + { TB_SERVICE("dma_test", 1) }, + { }, +}; +MODULE_DEVICE_TABLE(tbsvc, dma_test_ids); + +static struct tb_service_driver dma_test_driver = { + .driver = { + .owner = THIS_MODULE, + .name = "thunderbolt_dma_test", + .pm = &dma_test_pm_ops, + }, + .probe = dma_test_probe, + .remove = dma_test_remove, + .id_table = dma_test_ids, +}; + +static int __init dma_test_init(void) +{ + u64 data_value = DMA_TEST_DATA_PATTERN; + int i, ret; + + dma_test_pattern = kmalloc(DMA_TEST_FRAME_SIZE, GFP_KERNEL); + if (!dma_test_pattern) + return -ENOMEM; + + for (i = 0; i < DMA_TEST_FRAME_SIZE / sizeof(data_value); i++) + ((u32 *)dma_test_pattern)[i] = data_value++; + + dma_test_dir = tb_property_create_dir(&dma_test_dir_uuid); + if (!dma_test_dir) { + ret = -ENOMEM; + goto err_free_pattern; + } + + tb_property_add_immediate(dma_test_dir, "prtcid", 1); + tb_property_add_immediate(dma_test_dir, "prtcvers", 1); + tb_property_add_immediate(dma_test_dir, "prtcrevs", 0); + tb_property_add_immediate(dma_test_dir, "prtcstns", 0); + + ret = tb_register_property_dir("dma_test", dma_test_dir); + if (ret) + goto err_free_dir; + + ret = tb_register_service_driver(&dma_test_driver); + if (ret) + goto err_unregister_dir; + + return 0; + +err_unregister_dir: + tb_unregister_property_dir("dma_test", dma_test_dir); +err_free_dir: + tb_property_free_dir(dma_test_dir); +err_free_pattern: + kfree(dma_test_pattern); + + return ret; +} +module_init(dma_test_init); + +static void __exit dma_test_exit(void) +{ + tb_unregister_service_driver(&dma_test_driver); + tb_unregister_property_dir("dma_test", dma_test_dir); + tb_property_free_dir(dma_test_dir); + kfree(dma_test_pattern); +} +module_exit(dma_test_exit); + +MODULE_AUTHOR("Isaac Hazan <isaac.hazan@intel.com>"); +MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); +MODULE_DESCRIPTION("DMA traffic test driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c index b7980c856898..ec7b5f65804e 100644 --- a/drivers/thunderbolt/domain.c +++ b/drivers/thunderbolt/domain.c @@ -7,9 +7,7 @@ */ #include <linux/device.h> -#include <linux/dmar.h> #include <linux/idr.h> -#include <linux/iommu.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/slab.h> @@ -86,7 +84,7 @@ static int tb_service_probe(struct device *dev) return driver->probe(svc, id); } -static int tb_service_remove(struct device *dev) +static void tb_service_remove(struct device *dev) { struct tb_service *svc = tb_to_service(dev); struct tb_service_driver *driver; @@ -94,8 +92,6 @@ static int tb_service_remove(struct device *dev) driver = container_of(dev->driver, struct tb_service_driver, driver); if (driver->remove) driver->remove(svc); - - return 0; } static void tb_service_shutdown(struct device *dev) @@ -118,6 +114,7 @@ static const char * const tb_security_names[] = { [TB_SECURITY_SECURE] = "secure", [TB_SECURITY_DPONLY] = "dponly", [TB_SECURITY_USBONLY] = "usbonly", + [TB_SECURITY_NOPCIE] = "nopcie", }; static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr, @@ -147,11 +144,9 @@ static ssize_t boot_acl_show(struct device *dev, struct device_attribute *attr, for (ret = 0, i = 0; i < tb->nboot_acl; i++) { if (!uuid_is_null(&uuids[i])) - ret += snprintf(buf + ret, PAGE_SIZE - ret, "%pUb", - &uuids[i]); + ret += sysfs_emit_at(buf, ret, "%pUb", &uuids[i]); - ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s", - i < tb->nboot_acl - 1 ? "," : "\n"); + ret += sysfs_emit_at(buf, ret, "%s", i < tb->nboot_acl - 1 ? "," : "\n"); } out: @@ -238,17 +233,29 @@ err_free_str: } static DEVICE_ATTR_RW(boot_acl); +static ssize_t deauthorization_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + const struct tb *tb = container_of(dev, struct tb, dev); + bool deauthorization = false; + + /* Only meaningful if authorization is supported */ + if (tb->security_level == TB_SECURITY_USER || + tb->security_level == TB_SECURITY_SECURE) + deauthorization = !!tb->cm_ops->disapprove_switch; + + return sysfs_emit(buf, "%d\n", deauthorization); +} +static DEVICE_ATTR_RO(deauthorization); + static ssize_t iommu_dma_protection_show(struct device *dev, struct device_attribute *attr, char *buf) { - /* - * Kernel DMA protection is a feature where Thunderbolt security is - * handled natively using IOMMU. It is enabled when IOMMU is - * enabled and ACPI DMAR table has DMAR_PLATFORM_OPT_IN set. - */ - return sprintf(buf, "%d\n", - iommu_present(&pci_bus_type) && dmar_platform_optin()); + struct tb *tb = container_of(dev, struct tb, dev); + + return sysfs_emit(buf, "%d\n", tb->nhi->iommu_dma_protection); } static DEVICE_ATTR_RO(iommu_dma_protection); @@ -261,12 +268,13 @@ static ssize_t security_show(struct device *dev, struct device_attribute *attr, if (tb->security_level < ARRAY_SIZE(tb_security_names)) name = tb_security_names[tb->security_level]; - return sprintf(buf, "%s\n", name); + return sysfs_emit(buf, "%s\n", name); } static DEVICE_ATTR_RO(security); static struct attribute *domain_attrs[] = { &dev_attr_boot_acl.attr, + &dev_attr_deauthorization.attr, &dev_attr_iommu_dma_protection.attr, &dev_attr_security.attr, NULL, @@ -275,7 +283,7 @@ static struct attribute *domain_attrs[] = { static umode_t domain_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct tb *tb = container_of(dev, struct tb, dev); if (attr == &dev_attr_boot_acl.attr) { @@ -289,7 +297,7 @@ static umode_t domain_attr_is_visible(struct kobject *kobj, return attr->mode; } -static struct attribute_group domain_attr_group = { +static const struct attribute_group domain_attr_group = { .is_visible = domain_attr_is_visible, .attrs = domain_attrs, }; @@ -323,9 +331,34 @@ struct device_type tb_domain_type = { .release = tb_domain_release, }; +static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type, + const void *buf, size_t size) +{ + struct tb *tb = data; + + if (!tb->cm_ops->handle_event) { + tb_warn(tb, "domain does not have event handler\n"); + return true; + } + + switch (type) { + case TB_CFG_PKG_XDOMAIN_REQ: + case TB_CFG_PKG_XDOMAIN_RESP: + if (tb_is_xdomain_enabled()) + return tb_xdomain_handle_request(tb, type, buf, size); + break; + + default: + tb->cm_ops->handle_event(tb, type, buf, size); + } + + return true; +} + /** * tb_domain_alloc() - Allocate a domain * @nhi: Pointer to the host controller + * @timeout_msec: Control channel timeout for non-raw messages * @privsize: Size of the connection manager private data * * Allocates and initializes a new Thunderbolt domain. Connection @@ -337,7 +370,7 @@ struct device_type tb_domain_type = { * * Return: allocated domain structure on %NULL in case of error */ -struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize) +struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize) { struct tb *tb; @@ -364,6 +397,10 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize) if (!tb->wq) goto err_remove_ida; + tb->ctl = tb_ctl_alloc(nhi, timeout_msec, tb_domain_event_cb, tb); + if (!tb->ctl) + goto err_destroy_wq; + tb->dev.parent = &nhi->pdev->dev; tb->dev.bus = &tb_bus_type; tb->dev.type = &tb_domain_type; @@ -373,6 +410,8 @@ struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize) return tb; +err_destroy_wq: + destroy_workqueue(tb->wq); err_remove_ida: ida_simple_remove(&tb_domain_ida, tb->index); err_free: @@ -381,28 +420,6 @@ err_free: return NULL; } -static bool tb_domain_event_cb(void *data, enum tb_cfg_pkg_type type, - const void *buf, size_t size) -{ - struct tb *tb = data; - - if (!tb->cm_ops->handle_event) { - tb_warn(tb, "domain does not have event handler\n"); - return true; - } - - switch (type) { - case TB_CFG_PKG_XDOMAIN_REQ: - case TB_CFG_PKG_XDOMAIN_RESP: - return tb_xdomain_handle_request(tb, type, buf, size); - - default: - tb->cm_ops->handle_event(tb, type, buf, size); - } - - return true; -} - /** * tb_domain_add() - Add domain to the system * @tb: Domain to add @@ -422,13 +439,6 @@ int tb_domain_add(struct tb *tb) return -EINVAL; mutex_lock(&tb->lock); - - tb->ctl = tb_ctl_alloc(tb->nhi, tb_domain_event_cb, tb); - if (!tb->ctl) { - ret = -ENOMEM; - goto err_unlock; - } - /* * tb_schedule_hotplug_handler may be called as soon as the config * channel is started. Thats why we have to hold the lock here. @@ -441,6 +451,9 @@ int tb_domain_add(struct tb *tb) goto err_ctl_stop; } + tb_dbg(tb, "security level set to %s\n", + tb_security_names[tb->security_level]); + ret = device_add(&tb->dev); if (ret) goto err_ctl_stop; @@ -455,6 +468,8 @@ int tb_domain_add(struct tb *tb) /* This starts event processing */ mutex_unlock(&tb->lock); + device_init_wakeup(&tb->dev, true); + pm_runtime_no_callbacks(&tb->dev); pm_runtime_set_active(&tb->dev); pm_runtime_enable(&tb->dev); @@ -468,7 +483,6 @@ err_domain_del: device_del(&tb->dev); err_ctl_stop: tb_ctl_stop(tb->ctl); -err_unlock: mutex_unlock(&tb->lock); return ret; @@ -544,6 +558,33 @@ int tb_domain_suspend(struct tb *tb) return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0; } +int tb_domain_freeze_noirq(struct tb *tb) +{ + int ret = 0; + + mutex_lock(&tb->lock); + if (tb->cm_ops->freeze_noirq) + ret = tb->cm_ops->freeze_noirq(tb); + if (!ret) + tb_ctl_stop(tb->ctl); + mutex_unlock(&tb->lock); + + return ret; +} + +int tb_domain_thaw_noirq(struct tb *tb) +{ + int ret = 0; + + mutex_lock(&tb->lock); + tb_ctl_start(tb->ctl); + if (tb->cm_ops->thaw_noirq) + ret = tb->cm_ops->thaw_noirq(tb); + mutex_unlock(&tb->lock); + + return ret; +} + void tb_domain_complete(struct tb *tb) { if (tb->cm_ops->complete) @@ -573,13 +614,30 @@ int tb_domain_runtime_resume(struct tb *tb) } /** + * tb_domain_disapprove_switch() - Disapprove switch + * @tb: Domain the switch belongs to + * @sw: Switch to disapprove + * + * This will disconnect PCIe tunnel from parent to this @sw. + * + * Return: %0 on success and negative errno in case of failure. + */ +int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw) +{ + if (!tb->cm_ops->disapprove_switch) + return -EPERM; + + return tb->cm_ops->disapprove_switch(tb, sw); +} + +/** * tb_domain_approve_switch() - Approve switch * @tb: Domain the switch belongs to * @sw: Switch to approve * * This will approve switch by connection manager specific means. In - * case of success the connection manager will create tunnels for all - * supported protocols. + * case of success the connection manager will create PCIe tunnel from + * parent to @sw. */ int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw) { @@ -724,6 +782,10 @@ int tb_domain_disconnect_pcie_paths(struct tb *tb) * tb_domain_approve_xdomain_paths() - Enable DMA paths for XDomain * @tb: Domain enabling the DMA paths * @xd: XDomain DMA paths are created to + * @transmit_path: HopID we are using to send out packets + * @transmit_ring: DMA ring used to send out packets + * @receive_path: HopID the other end is using to send packets to us + * @receive_ring: DMA ring used to receive packets from @receive_path * * Calls connection manager specific method to enable DMA paths to the * XDomain in question. @@ -732,18 +794,25 @@ int tb_domain_disconnect_pcie_paths(struct tb *tb) * particular returns %-ENOTSUPP if the connection manager * implementation does not support XDomains. */ -int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) +int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring) { if (!tb->cm_ops->approve_xdomain_paths) return -ENOTSUPP; - return tb->cm_ops->approve_xdomain_paths(tb, xd); + return tb->cm_ops->approve_xdomain_paths(tb, xd, transmit_path, + transmit_ring, receive_path, receive_ring); } /** * tb_domain_disconnect_xdomain_paths() - Disable DMA paths for XDomain * @tb: Domain disabling the DMA paths * @xd: XDomain whose DMA paths are disconnected + * @transmit_path: HopID we are using to send out packets + * @transmit_ring: DMA ring used to send out packets + * @receive_path: HopID the other end is using to send packets to us + * @receive_ring: DMA ring used to receive packets from @receive_path * * Calls connection manager specific method to disconnect DMA paths to * the XDomain in question. @@ -752,12 +821,15 @@ int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) * particular returns %-ENOTSUPP if the connection manager * implementation does not support XDomains. */ -int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) +int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring) { if (!tb->cm_ops->disconnect_xdomain_paths) return -ENOTSUPP; - return tb->cm_ops->disconnect_xdomain_paths(tb, xd); + return tb->cm_ops->disconnect_xdomain_paths(tb, xd, transmit_path, + transmit_ring, receive_path, receive_ring); } static int disconnect_xdomain(struct device *dev, void *data) @@ -768,7 +840,7 @@ static int disconnect_xdomain(struct device *dev, void *data) xd = tb_to_xdomain(dev); if (xd && xd->tb == tb) - ret = tb_xdomain_disable_paths(xd); + ret = tb_xdomain_disable_all_paths(xd); return ret; } @@ -798,12 +870,23 @@ int tb_domain_init(void) { int ret; + tb_debugfs_init(); + tb_acpi_init(); + ret = tb_xdomain_init(); if (ret) - return ret; + goto err_acpi; ret = bus_register(&tb_bus_type); if (ret) - tb_xdomain_exit(); + goto err_xdomain; + + return 0; + +err_xdomain: + tb_xdomain_exit(); +err_acpi: + tb_acpi_exit(); + tb_debugfs_exit(); return ret; } @@ -812,6 +895,8 @@ void tb_domain_exit(void) { bus_unregister(&tb_bus_type); ida_destroy(&tb_domain_ida); - tb_switch_exit(); + tb_nvm_exit(); tb_xdomain_exit(); + tb_acpi_exit(); + tb_debugfs_exit(); } diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c index 921d164b3f35..c90d22f56d4e 100644 --- a/drivers/thunderbolt/eeprom.c +++ b/drivers/thunderbolt/eeprom.c @@ -7,24 +7,25 @@ */ #include <linux/crc32.h> +#include <linux/delay.h> #include <linux/property.h> #include <linux/slab.h> #include "tb.h" -/** +/* * tb_eeprom_ctl_write() - write control word */ static int tb_eeprom_ctl_write(struct tb_switch *sw, struct tb_eeprom_ctl *ctl) { - return tb_sw_write(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1); + return tb_sw_write(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + ROUTER_CS_4, 1); } -/** +/* * tb_eeprom_ctl_write() - read control word */ static int tb_eeprom_ctl_read(struct tb_switch *sw, struct tb_eeprom_ctl *ctl) { - return tb_sw_read(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + 4, 1); + return tb_sw_read(sw, ctl, TB_CFG_SWITCH, sw->cap_plug_events + ROUTER_CS_4, 1); } enum tb_eeprom_transfer { @@ -32,7 +33,7 @@ enum tb_eeprom_transfer { TB_EEPROM_OUT, }; -/** +/* * tb_eeprom_active - enable rom access * * WARNING: Always disable access after usage. Otherwise the controller will @@ -45,27 +46,27 @@ static int tb_eeprom_active(struct tb_switch *sw, bool enable) if (res) return res; if (enable) { - ctl.access_high = 1; + ctl.bit_banging_enable = 1; res = tb_eeprom_ctl_write(sw, &ctl); if (res) return res; - ctl.access_low = 0; + ctl.fl_cs = 0; return tb_eeprom_ctl_write(sw, &ctl); } else { - ctl.access_low = 1; + ctl.fl_cs = 1; res = tb_eeprom_ctl_write(sw, &ctl); if (res) return res; - ctl.access_high = 0; + ctl.bit_banging_enable = 0; return tb_eeprom_ctl_write(sw, &ctl); } } -/** +/* * tb_eeprom_transfer - transfer one bit * - * If TB_EEPROM_IN is passed, then the bit can be retrieved from ctl->data_in. - * If TB_EEPROM_OUT is passed, then ctl->data_out will be written. + * If TB_EEPROM_IN is passed, then the bit can be retrieved from ctl->fl_do. + * If TB_EEPROM_OUT is passed, then ctl->fl_di will be written. */ static int tb_eeprom_transfer(struct tb_switch *sw, struct tb_eeprom_ctl *ctl, enum tb_eeprom_transfer direction) @@ -76,7 +77,7 @@ static int tb_eeprom_transfer(struct tb_switch *sw, struct tb_eeprom_ctl *ctl, if (res) return res; } - ctl->clock = 1; + ctl->fl_sk = 1; res = tb_eeprom_ctl_write(sw, ctl); if (res) return res; @@ -85,11 +86,11 @@ static int tb_eeprom_transfer(struct tb_switch *sw, struct tb_eeprom_ctl *ctl, if (res) return res; } - ctl->clock = 0; + ctl->fl_sk = 0; return tb_eeprom_ctl_write(sw, ctl); } -/** +/* * tb_eeprom_out - write one byte to the bus */ static int tb_eeprom_out(struct tb_switch *sw, u8 val) @@ -100,7 +101,7 @@ static int tb_eeprom_out(struct tb_switch *sw, u8 val) if (res) return res; for (i = 0; i < 8; i++) { - ctl.data_out = val & 0x80; + ctl.fl_di = val & 0x80; res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_OUT); if (res) return res; @@ -109,7 +110,7 @@ static int tb_eeprom_out(struct tb_switch *sw, u8 val) return 0; } -/** +/* * tb_eeprom_in - read one byte from the bus */ static int tb_eeprom_in(struct tb_switch *sw, u8 *val) @@ -125,12 +126,12 @@ static int tb_eeprom_in(struct tb_switch *sw, u8 *val) res = tb_eeprom_transfer(sw, &ctl, TB_EEPROM_IN); if (res) return res; - *val |= ctl.data_in; + *val |= ctl.fl_do; } return 0; } -/** +/* * tb_eeprom_get_drom_offset - get drom offset within eeprom */ static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset) @@ -161,7 +162,7 @@ static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset) return 0; } -/** +/* * tb_eeprom_read_n - read count bytes from offset into val */ static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val, @@ -213,7 +214,10 @@ static u32 tb_crc32(void *data, size_t len) return ~__crc32c_le(~0, data, len); } -#define TB_DROM_DATA_START 13 +#define TB_DROM_DATA_START 13 +#define TB_DROM_HEADER_SIZE 22 +#define USB4_DROM_HEADER_SIZE 16 + struct tb_drom_header { /* BYTE 0 */ u8 uid_crc8; /* checksum for uid */ @@ -223,9 +227,9 @@ struct tb_drom_header { u32 data_crc32; /* checksum for data_len bytes starting at byte 13 */ /* BYTE 13 */ u8 device_rom_revision; /* should be <= 1 */ - u16 data_len:10; - u8 __unknown1:6; - /* BYTES 16-21 */ + u16 data_len:12; + u8 reserved:4; + /* BYTES 16-21 - Only for TBT DROM, nonexistent in USB4 DROM */ u16 vendor_id; u16 model_id; u8 model_rev; @@ -247,7 +251,7 @@ struct tb_drom_entry_header { struct tb_drom_entry_generic { struct tb_drom_entry_header header; - u8 data[0]; + u8 data[]; } __packed; struct tb_drom_entry_port { @@ -276,9 +280,21 @@ struct tb_drom_entry_port { u8 unknown4:2; } __packed; +/* USB4 product descriptor */ +struct tb_drom_entry_desc { + struct tb_drom_entry_header header; + u16 bcdUSBSpec; + u16 idVendor; + u16 idProduct; + u16 bcdProductFWRevision; + u32 TID; + u8 productHWRevision; +}; /** - * tb_drom_read_uid_only - read uid directly from drom + * tb_drom_read_uid_only() - Read UID directly from DROM + * @sw: Router whose UID to read + * @uid: UID is placed here * * Does not use the cached copy in sw->drom. Used during resume to check switch * identity. @@ -326,6 +342,16 @@ static int tb_drom_parse_entry_generic(struct tb_switch *sw, if (!sw->device_name) return -ENOMEM; break; + case 9: { + const struct tb_drom_entry_desc *desc = + (const struct tb_drom_entry_desc *)entry; + + if (!sw->vendor && !sw->device) { + sw->vendor = desc->idVendor; + sw->device = desc->idProduct; + } + break; + } } return 0; @@ -373,15 +399,15 @@ static int tb_drom_parse_entry_port(struct tb_switch *sw, return 0; } -/** +/* * tb_drom_parse_entries - parse the linked list of drom entries * * Drom must have been copied to sw->drom. */ -static int tb_drom_parse_entries(struct tb_switch *sw) +static int tb_drom_parse_entries(struct tb_switch *sw, size_t header_size) { struct tb_drom_header *header = (void *) sw->drom; - u16 pos = sizeof(*header); + u16 pos = header_size; u16 drom_size = header->data_len + TB_DROM_DATA_START; int res; @@ -389,8 +415,8 @@ static int tb_drom_parse_entries(struct tb_switch *sw) struct tb_drom_entry_header *entry = (void *) (sw->drom + pos); if (pos + 1 == drom_size || pos + entry->len > drom_size || !entry->len) { - tb_sw_warn(sw, "drom buffer overrun, aborting\n"); - return -EIO; + tb_sw_warn(sw, "DROM buffer overrun\n"); + return -EILSEQ; } switch (entry->type) { @@ -409,7 +435,7 @@ static int tb_drom_parse_entries(struct tb_switch *sw) return 0; } -/** +/* * tb_drom_copy_efi - copy drom supplied by EFI to sw->drom if present */ static int tb_drom_copy_efi(struct tb_switch *sw, u16 *size) @@ -518,15 +544,67 @@ static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val, return tb_eeprom_read_n(sw, offset, val, count); } +static int tb_drom_parse(struct tb_switch *sw) +{ + const struct tb_drom_header *header = + (const struct tb_drom_header *)sw->drom; + u32 crc; + + crc = tb_crc8((u8 *) &header->uid, 8); + if (crc != header->uid_crc8) { + tb_sw_warn(sw, + "DROM UID CRC8 mismatch (expected: %#x, got: %#x)\n", + header->uid_crc8, crc); + return -EILSEQ; + } + if (!sw->uid) + sw->uid = header->uid; + sw->vendor = header->vendor_id; + sw->device = header->model_id; + + crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len); + if (crc != header->data_crc32) { + tb_sw_warn(sw, + "DROM data CRC32 mismatch (expected: %#x, got: %#x), continuing\n", + header->data_crc32, crc); + } + + return tb_drom_parse_entries(sw, TB_DROM_HEADER_SIZE); +} + +static int usb4_drom_parse(struct tb_switch *sw) +{ + const struct tb_drom_header *header = + (const struct tb_drom_header *)sw->drom; + u32 crc; + + crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len); + if (crc != header->data_crc32) { + tb_sw_warn(sw, + "DROM data CRC32 mismatch (expected: %#x, got: %#x), aborting\n", + header->data_crc32, crc); + return -EINVAL; + } + + return tb_drom_parse_entries(sw, USB4_DROM_HEADER_SIZE); +} + /** - * tb_drom_read - copy drom to sw->drom and parse it + * tb_drom_read() - Copy DROM to sw->drom and parse it + * @sw: Router whose DROM to read and parse + * + * This function reads router DROM and if successful parses the entries and + * populates the fields in @sw accordingly. Can be called for any router + * generation. + * + * Returns %0 in case of success and negative errno otherwise. */ int tb_drom_read(struct tb_switch *sw) { u16 size; - u32 crc; struct tb_drom_header *header; - int res; + int res, retries = 1; + if (sw->drom) return 0; @@ -576,6 +654,7 @@ int tb_drom_read(struct tb_switch *sw) sw->drom = kzalloc(size, GFP_KERNEL); if (!sw->drom) return -ENOMEM; +read: res = tb_drom_read_n(sw, 0, sw->drom, size); if (res) goto err; @@ -584,37 +663,41 @@ parse: header = (void *) sw->drom; if (header->data_len + TB_DROM_DATA_START != size) { - tb_sw_warn(sw, "drom size mismatch, aborting\n"); + tb_sw_warn(sw, "drom size mismatch\n"); + if (retries--) { + msleep(100); + goto read; + } goto err; } - crc = tb_crc8((u8 *) &header->uid, 8); - if (crc != header->uid_crc8) { - tb_sw_warn(sw, - "drom uid crc8 mismatch (expected: %#x, got: %#x), aborting\n", - header->uid_crc8, crc); - goto err; + tb_sw_dbg(sw, "DROM version: %d\n", header->device_rom_revision); + + switch (header->device_rom_revision) { + case 3: + res = usb4_drom_parse(sw); + break; + default: + tb_sw_warn(sw, "DROM device_rom_revision %#x unknown\n", + header->device_rom_revision); + fallthrough; + case 1: + res = tb_drom_parse(sw); + break; } - if (!sw->uid) - sw->uid = header->uid; - sw->vendor = header->vendor_id; - sw->device = header->model_id; - crc = tb_crc32(sw->drom + TB_DROM_DATA_START, header->data_len); - if (crc != header->data_crc32) { - tb_sw_warn(sw, - "drom data crc32 mismatch (expected: %#x, got: %#x), continuing\n", - header->data_crc32, crc); + /* If the DROM parsing fails, wait a moment and retry once */ + if (res == -EILSEQ && retries--) { + tb_sw_warn(sw, "parsing DROM failed\n"); + msleep(100); + goto read; } - if (header->device_rom_revision > 2) - tb_sw_warn(sw, "drom device_rom_revision %#x unknown\n", - header->device_rom_revision); + if (!res) + return 0; - return tb_drom_parse_entries(sw); err: kfree(sw->drom); sw->drom = NULL; return -EIO; - } diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 13e88109742e..86521ebb2579 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -49,6 +49,18 @@ module_param(start_icm, bool, 0444); MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: false)"); /** + * struct usb4_switch_nvm_auth - Holds USB4 NVM_AUTH status + * @reply: Reply from ICM firmware is placed here + * @request: Request that is sent to ICM firmware + * @icm: Pointer to ICM private data + */ +struct usb4_switch_nvm_auth { + struct icm_usb4_switch_op_response reply; + struct icm_usb4_switch_op request; + struct icm *icm; +}; + +/** * struct icm - Internal connection manager private data * @request_lock: Makes sure only one message is send to ICM at time * @rescan_work: Work used to rescan the surviving switches after resume @@ -61,6 +73,8 @@ MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: f * @max_boot_acl: Maximum number of preboot ACL entries (%0 if not supported) * @rpm: Does the controller support runtime PM (RTD3) * @can_upgrade_nvm: Can the NVM firmware be upgrade on this controller + * @proto_version: Firmware protocol version + * @last_nvm_auth: Last USB4 router NVM_AUTH result (or %NULL if not set) * @veto: Is RTD3 veto in effect * @is_supported: Checks if we can support ICM on this controller * @cio_reset: Trigger CIO reset @@ -71,19 +85,21 @@ MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: f * @set_uuid: Set UUID for the root switch (optional) * @device_connected: Handle device connected ICM message * @device_disconnected: Handle device disconnected ICM message - * @xdomain_connected - Handle XDomain connected ICM message - * @xdomain_disconnected - Handle XDomain disconnected ICM message + * @xdomain_connected: Handle XDomain connected ICM message + * @xdomain_disconnected: Handle XDomain disconnected ICM message * @rtd3_veto: Handle RTD3 veto notification ICM message */ struct icm { struct mutex request_lock; struct delayed_work rescan_work; struct pci_dev *upstream_port; - size_t max_boot_acl; int vnd_cap; bool safe_mode; + size_t max_boot_acl; bool rpm; bool can_upgrade_nvm; + u8 proto_version; + struct usb4_switch_nvm_auth *last_nvm_auth; bool veto; bool (*is_supported)(struct tb *tb); int (*cio_reset)(struct tb *tb); @@ -92,7 +108,7 @@ struct icm { void (*save_devices)(struct tb *tb); int (*driver_ready)(struct tb *tb, enum tb_security_level *security_level, - size_t *nboot_acl, bool *rpm); + u8 *proto_version, size_t *nboot_acl, bool *rpm); void (*set_uuid)(struct tb *tb); void (*device_connected)(struct tb *tb, const struct icm_pkg_header *hdr); @@ -114,7 +130,7 @@ struct icm_notification { struct ep_name_entry { u8 len; u8 type; - u8 data[0]; + u8 data[]; }; #define EP_NAME_INTEL_VSS 0x10 @@ -437,7 +453,7 @@ static void icm_fr_save_devices(struct tb *tb) static int icm_fr_driver_ready(struct tb *tb, enum tb_security_level *security_level, - size_t *nboot_acl, bool *rpm) + u8 *proto_version, size_t *nboot_acl, bool *rpm) { struct icm_fr_pkg_driver_ready_response reply; struct icm_pkg_driver_ready request = { @@ -541,7 +557,9 @@ static int icm_fr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, return 0; } -static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) +static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring) { struct icm_fr_pkg_approve_xdomain_response reply; struct icm_fr_pkg_approve_xdomain request; @@ -552,10 +570,10 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) request.link_info = xd->depth << ICM_LINK_INFO_DEPTH_SHIFT | xd->link; memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); - request.transmit_path = xd->transmit_path; - request.transmit_ring = xd->transmit_ring; - request.receive_path = xd->receive_path; - request.receive_ring = xd->receive_ring; + request.transmit_path = transmit_path; + request.transmit_ring = transmit_ring; + request.receive_path = receive_path; + request.receive_ring = receive_ring; memset(&reply, 0, sizeof(reply)); ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), @@ -569,7 +587,9 @@ static int icm_fr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) return 0; } -static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) +static int icm_fr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring) { u8 phy_port; u8 cmd; @@ -870,7 +890,13 @@ icm_fr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) return; } + pm_runtime_get_sync(sw->dev.parent); + remove_switch(sw); + + pm_runtime_mark_last_busy(sw->dev.parent); + pm_runtime_put_autosuspend(sw->dev.parent); + tb_switch_put(sw); } @@ -986,7 +1012,7 @@ static int icm_tr_cio_reset(struct tb *tb) static int icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level, - size_t *nboot_acl, bool *rpm) + u8 *proto_version, size_t *nboot_acl, bool *rpm) { struct icm_tr_pkg_driver_ready_response reply; struct icm_pkg_driver_ready request = { @@ -1002,6 +1028,9 @@ icm_tr_driver_ready(struct tb *tb, enum tb_security_level *security_level, if (security_level) *security_level = reply.info & ICM_TR_INFO_SLEVEL_MASK; + if (proto_version) + *proto_version = (reply.info & ICM_TR_INFO_PROTO_VERSION_MASK) >> + ICM_TR_INFO_PROTO_VERSION_SHIFT; if (nboot_acl) *nboot_acl = (reply.info & ICM_TR_INFO_BOOT_ACL_MASK) >> ICM_TR_INFO_BOOT_ACL_SHIFT; @@ -1097,7 +1126,9 @@ static int icm_tr_challenge_switch_key(struct tb *tb, struct tb_switch *sw, return 0; } -static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) +static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring) { struct icm_tr_pkg_approve_xdomain_response reply; struct icm_tr_pkg_approve_xdomain request; @@ -1107,10 +1138,10 @@ static int icm_tr_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) request.hdr.code = ICM_APPROVE_XDOMAIN; request.route_hi = upper_32_bits(xd->route); request.route_lo = lower_32_bits(xd->route); - request.transmit_path = xd->transmit_path; - request.transmit_ring = xd->transmit_ring; - request.receive_path = xd->receive_path; - request.receive_ring = xd->receive_ring; + request.transmit_path = transmit_path; + request.transmit_ring = transmit_ring; + request.receive_path = receive_path; + request.receive_ring = receive_ring; memcpy(&request.remote_uuid, xd->remote_uuid, sizeof(*xd->remote_uuid)); memset(&reply, 0, sizeof(reply)); @@ -1151,7 +1182,9 @@ static int icm_tr_xdomain_tear_down(struct tb *tb, struct tb_xdomain *xd, return 0; } -static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) +static int icm_tr_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring) { int ret; @@ -1280,8 +1313,13 @@ icm_tr_device_disconnected(struct tb *tb, const struct icm_pkg_header *hdr) tb_warn(tb, "no switch exists at %llx, ignoring\n", route); return; } + pm_runtime_get_sync(sw->dev.parent); remove_switch(sw); + + pm_runtime_mark_last_busy(sw->dev.parent); + pm_runtime_put_autosuspend(sw->dev.parent); + tb_switch_put(sw); } @@ -1450,7 +1488,7 @@ static int icm_ar_get_mode(struct tb *tb) static int icm_ar_driver_ready(struct tb *tb, enum tb_security_level *security_level, - size_t *nboot_acl, bool *rpm) + u8 *proto_version, size_t *nboot_acl, bool *rpm) { struct icm_ar_pkg_driver_ready_response reply; struct icm_pkg_driver_ready request = { @@ -1580,7 +1618,7 @@ static int icm_ar_set_boot_acl(struct tb *tb, const uuid_t *uuids, static int icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level, - size_t *nboot_acl, bool *rpm) + u8 *proto_version, size_t *nboot_acl, bool *rpm) { struct icm_tr_pkg_driver_ready_response reply; struct icm_pkg_driver_ready request = { @@ -1594,6 +1632,10 @@ icm_icl_driver_ready(struct tb *tb, enum tb_security_level *security_level, if (ret) return ret; + if (proto_version) + *proto_version = (reply.info & ICM_TR_INFO_PROTO_VERSION_MASK) >> + ICM_TR_INFO_PROTO_VERSION_SHIFT; + /* Ice Lake always supports RTD3 */ if (rpm) *rpm = true; @@ -1633,6 +1675,22 @@ static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr) icm_veto_end(tb); } +static bool icm_tgl_is_supported(struct tb *tb) +{ + unsigned long end = jiffies + msecs_to_jiffies(10); + + do { + u32 val; + + val = ioread32(tb->nhi->iobase + REG_FW_STS); + if (val & REG_FW_STS_NVM_AUTH_DONE) + return true; + usleep_range(100, 500); + } while (time_before(jiffies, end)); + + return false; +} + static void icm_handle_notification(struct work_struct *work) { struct icm_notification *n = container_of(work, typeof(*n), work); @@ -1655,10 +1713,12 @@ static void icm_handle_notification(struct work_struct *work) icm->device_disconnected(tb, n->pkg); break; case ICM_EVENT_XDOMAIN_CONNECTED: - icm->xdomain_connected(tb, n->pkg); + if (tb_is_xdomain_enabled()) + icm->xdomain_connected(tb, n->pkg); break; case ICM_EVENT_XDOMAIN_DISCONNECTED: - icm->xdomain_disconnected(tb, n->pkg); + if (tb_is_xdomain_enabled()) + icm->xdomain_disconnected(tb, n->pkg); break; case ICM_EVENT_RTD3_VETO: icm->rtd3_veto(tb, n->pkg); @@ -1681,8 +1741,13 @@ static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, if (!n) return; - INIT_WORK(&n->work, icm_handle_notification); n->pkg = kmemdup(buf, size, GFP_KERNEL); + if (!n->pkg) { + kfree(n); + return; + } + + INIT_WORK(&n->work, icm_handle_notification); n->tb = tb; queue_work(tb->wq, &n->work); @@ -1690,13 +1755,14 @@ static void icm_handle_event(struct tb *tb, enum tb_cfg_pkg_type type, static int __icm_driver_ready(struct tb *tb, enum tb_security_level *security_level, - size_t *nboot_acl, bool *rpm) + u8 *proto_version, size_t *nboot_acl, bool *rpm) { struct icm *icm = tb_priv(tb); unsigned int retries = 50; int ret; - ret = icm->driver_ready(tb, security_level, nboot_acl, rpm); + ret = icm->driver_ready(tb, security_level, proto_version, nboot_acl, + rpm); if (ret) { tb_err(tb, "failed to send driver ready to ICM\n"); return ret; @@ -1906,8 +1972,8 @@ static int icm_driver_ready(struct tb *tb) return 0; } - ret = __icm_driver_ready(tb, &tb->security_level, &tb->nboot_acl, - &icm->rpm); + ret = __icm_driver_ready(tb, &tb->security_level, &icm->proto_version, + &tb->nboot_acl, &icm->rpm); if (ret) return ret; @@ -1918,6 +1984,9 @@ static int icm_driver_ready(struct tb *tb) if (tb->nboot_acl > icm->max_boot_acl) tb->nboot_acl = 0; + if (icm->proto_version >= 3) + tb_dbg(tb, "USB4 proxy operations supported\n"); + return 0; } @@ -1964,7 +2033,9 @@ static int complete_rpm(struct device *dev, void *data) static void remove_unplugged_switch(struct tb_switch *sw) { - pm_runtime_get_sync(sw->dev.parent); + struct device *parent = get_device(sw->dev.parent); + + pm_runtime_get_sync(parent); /* * Signal this and switches below for rpm_complete because @@ -1975,8 +2046,10 @@ static void remove_unplugged_switch(struct tb_switch *sw) bus_for_each_dev(&tb_bus_type, &sw->dev, NULL, complete_rpm); tb_switch_remove(sw); - pm_runtime_mark_last_busy(sw->dev.parent); - pm_runtime_put_autosuspend(sw->dev.parent); + pm_runtime_mark_last_busy(parent); + pm_runtime_put_autosuspend(parent); + + put_device(parent); } static void icm_free_unplugged_children(struct tb_switch *sw) @@ -2029,7 +2102,7 @@ static void icm_complete(struct tb *tb) * Now all existing children should be resumed, start events * from ICM to get updated status. */ - __icm_driver_ready(tb, NULL, NULL, NULL); + __icm_driver_ready(tb, NULL, NULL, NULL, NULL); /* * We do not get notifications of devices that have been @@ -2108,6 +2181,8 @@ static void icm_stop(struct tb *tb) tb_switch_remove(tb->root_switch); tb->root_switch = NULL; nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DRV_UNLOADS, 0); + kfree(icm->last_nvm_auth); + icm->last_nvm_auth = NULL; } static int icm_disconnect_pcie_paths(struct tb *tb) @@ -2115,6 +2190,165 @@ static int icm_disconnect_pcie_paths(struct tb *tb) return nhi_mailbox_cmd(tb->nhi, NHI_MAILBOX_DISCONNECT_PCIE_PATHS, 0); } +static void icm_usb4_switch_nvm_auth_complete(void *data) +{ + struct usb4_switch_nvm_auth *auth = data; + struct icm *icm = auth->icm; + struct tb *tb = icm_to_tb(icm); + + tb_dbg(tb, "NVM_AUTH response for %llx flags %#x status %#x\n", + get_route(auth->reply.route_hi, auth->reply.route_lo), + auth->reply.hdr.flags, auth->reply.status); + + mutex_lock(&tb->lock); + if (WARN_ON(icm->last_nvm_auth)) + kfree(icm->last_nvm_auth); + icm->last_nvm_auth = auth; + mutex_unlock(&tb->lock); +} + +static int icm_usb4_switch_nvm_authenticate(struct tb *tb, u64 route) +{ + struct usb4_switch_nvm_auth *auth; + struct icm *icm = tb_priv(tb); + struct tb_cfg_request *req; + int ret; + + auth = kzalloc(sizeof(*auth), GFP_KERNEL); + if (!auth) + return -ENOMEM; + + auth->icm = icm; + auth->request.hdr.code = ICM_USB4_SWITCH_OP; + auth->request.route_hi = upper_32_bits(route); + auth->request.route_lo = lower_32_bits(route); + auth->request.opcode = USB4_SWITCH_OP_NVM_AUTH; + + req = tb_cfg_request_alloc(); + if (!req) { + ret = -ENOMEM; + goto err_free_auth; + } + + req->match = icm_match; + req->copy = icm_copy; + req->request = &auth->request; + req->request_size = sizeof(auth->request); + req->request_type = TB_CFG_PKG_ICM_CMD; + req->response = &auth->reply; + req->npackets = 1; + req->response_size = sizeof(auth->reply); + req->response_type = TB_CFG_PKG_ICM_RESP; + + tb_dbg(tb, "NVM_AUTH request for %llx\n", route); + + mutex_lock(&icm->request_lock); + ret = tb_cfg_request(tb->ctl, req, icm_usb4_switch_nvm_auth_complete, + auth); + mutex_unlock(&icm->request_lock); + + tb_cfg_request_put(req); + if (ret) + goto err_free_auth; + return 0; + +err_free_auth: + kfree(auth); + return ret; +} + +static int icm_usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, + u8 *status, const void *tx_data, size_t tx_data_len, + void *rx_data, size_t rx_data_len) +{ + struct icm_usb4_switch_op_response reply; + struct icm_usb4_switch_op request; + struct tb *tb = sw->tb; + struct icm *icm = tb_priv(tb); + u64 route = tb_route(sw); + int ret; + + /* + * USB4 router operation proxy is supported in firmware if the + * protocol version is 3 or higher. + */ + if (icm->proto_version < 3) + return -EOPNOTSUPP; + + /* + * NVM_AUTH is a special USB4 proxy operation that does not + * return immediately so handle it separately. + */ + if (opcode == USB4_SWITCH_OP_NVM_AUTH) + return icm_usb4_switch_nvm_authenticate(tb, route); + + memset(&request, 0, sizeof(request)); + request.hdr.code = ICM_USB4_SWITCH_OP; + request.route_hi = upper_32_bits(route); + request.route_lo = lower_32_bits(route); + request.opcode = opcode; + if (metadata) + request.metadata = *metadata; + + if (tx_data_len) { + request.data_len_valid |= ICM_USB4_SWITCH_DATA_VALID; + if (tx_data_len < ARRAY_SIZE(request.data)) + request.data_len_valid = + tx_data_len & ICM_USB4_SWITCH_DATA_LEN_MASK; + memcpy(request.data, tx_data, tx_data_len * sizeof(u32)); + } + + memset(&reply, 0, sizeof(reply)); + ret = icm_request(tb, &request, sizeof(request), &reply, sizeof(reply), + 1, ICM_TIMEOUT); + if (ret) + return ret; + + if (reply.hdr.flags & ICM_FLAGS_ERROR) + return -EIO; + + if (status) + *status = reply.status; + + if (metadata) + *metadata = reply.metadata; + + if (rx_data_len) + memcpy(rx_data, reply.data, rx_data_len * sizeof(u32)); + + return 0; +} + +static int icm_usb4_switch_nvm_authenticate_status(struct tb_switch *sw, + u32 *status) +{ + struct usb4_switch_nvm_auth *auth; + struct tb *tb = sw->tb; + struct icm *icm = tb_priv(tb); + int ret = 0; + + if (icm->proto_version < 3) + return -EOPNOTSUPP; + + auth = icm->last_nvm_auth; + icm->last_nvm_auth = NULL; + + if (auth && auth->reply.route_hi == sw->config.route_hi && + auth->reply.route_lo == sw->config.route_lo) { + tb_dbg(tb, "NVM_AUTH found for %llx flags %#x status %#x\n", + tb_route(sw), auth->reply.hdr.flags, auth->reply.status); + if (auth->reply.hdr.flags & ICM_FLAGS_ERROR) + ret = -EIO; + else + *status = auth->reply.status; + } else { + *status = 0; + } + + kfree(auth); + return ret; +} + /* Falcon Ridge */ static const struct tb_cm_ops icm_fr_ops = { .driver_ready = icm_driver_ready, @@ -2173,6 +2407,9 @@ static const struct tb_cm_ops icm_tr_ops = { .disconnect_pcie_paths = icm_disconnect_pcie_paths, .approve_xdomain_paths = icm_tr_approve_xdomain_paths, .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, + .usb4_switch_op = icm_usb4_switch_op, + .usb4_switch_nvm_authenticate_status = + icm_usb4_switch_nvm_authenticate_status, }; /* Ice Lake */ @@ -2186,6 +2423,9 @@ static const struct tb_cm_ops icm_icl_ops = { .handle_event = icm_handle_event, .approve_xdomain_paths = icm_tr_approve_xdomain_paths, .disconnect_xdomain_paths = icm_tr_disconnect_xdomain_paths, + .usb4_switch_op = icm_usb4_switch_op, + .usb4_switch_nvm_authenticate_status = + icm_usb4_switch_nvm_authenticate_status, }; struct tb *icm_probe(struct tb_nhi *nhi) @@ -2193,7 +2433,7 @@ struct tb *icm_probe(struct tb_nhi *nhi) struct icm *icm; struct tb *tb; - tb = tb_domain_alloc(nhi, sizeof(struct icm)); + tb = tb_domain_alloc(nhi, ICM_TIMEOUT, sizeof(struct icm)); if (!tb) return NULL; @@ -2269,6 +2509,40 @@ struct tb *icm_probe(struct tb_nhi *nhi) icm->rtd3_veto = icm_icl_rtd3_veto; tb->cm_ops = &icm_icl_ops; break; + + case PCI_DEVICE_ID_INTEL_TGL_NHI0: + case PCI_DEVICE_ID_INTEL_TGL_NHI1: + case PCI_DEVICE_ID_INTEL_TGL_H_NHI0: + case PCI_DEVICE_ID_INTEL_TGL_H_NHI1: + case PCI_DEVICE_ID_INTEL_ADL_NHI0: + case PCI_DEVICE_ID_INTEL_ADL_NHI1: + case PCI_DEVICE_ID_INTEL_RPL_NHI0: + case PCI_DEVICE_ID_INTEL_RPL_NHI1: + case PCI_DEVICE_ID_INTEL_MTL_M_NHI0: + case PCI_DEVICE_ID_INTEL_MTL_P_NHI0: + case PCI_DEVICE_ID_INTEL_MTL_P_NHI1: + icm->is_supported = icm_tgl_is_supported; + icm->driver_ready = icm_icl_driver_ready; + icm->set_uuid = icm_icl_set_uuid; + icm->device_connected = icm_icl_device_connected; + icm->device_disconnected = icm_tr_device_disconnected; + icm->xdomain_connected = icm_tr_xdomain_connected; + icm->xdomain_disconnected = icm_tr_xdomain_disconnected; + icm->rtd3_veto = icm_icl_rtd3_veto; + tb->cm_ops = &icm_icl_ops; + break; + + case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI: + case PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI: + icm->is_supported = icm_tgl_is_supported; + icm->get_mode = icm_ar_get_mode; + icm->driver_ready = icm_tr_driver_ready; + icm->device_connected = icm_tr_device_connected; + icm->device_disconnected = icm_tr_device_disconnected; + icm->xdomain_connected = icm_tr_xdomain_connected; + icm->xdomain_disconnected = icm_tr_xdomain_disconnected; + tb->cm_ops = &icm_tr_ops; + break; } if (!icm->is_supported || !icm->is_supported(tb)) { @@ -2277,5 +2551,7 @@ struct tb *icm_probe(struct tb_nhi *nhi) return NULL; } + tb_dbg(tb, "using firmware connection manager\n"); + return tb; } diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c index bd44d50246d2..633970fbe9b0 100644 --- a/drivers/thunderbolt/lc.c +++ b/drivers/thunderbolt/lc.c @@ -45,7 +45,7 @@ static int find_port_lc_cap(struct tb_port *port) return sw->cap_lc + start + phys * size; } -static int tb_lc_configure_lane(struct tb_port *port, bool configure) +static int tb_lc_set_port_configured(struct tb_port *port, bool configured) { bool upstream = tb_is_upstream_port(port); struct tb_switch *sw = port->sw; @@ -69,7 +69,7 @@ static int tb_lc_configure_lane(struct tb_port *port, bool configure) else lane = TB_LC_SX_CTRL_L2C; - if (configure) { + if (configured) { ctrl |= lane; if (upstream) ctrl |= TB_LC_SX_CTRL_UPSTREAM; @@ -83,55 +83,317 @@ static int tb_lc_configure_lane(struct tb_port *port, bool configure) } /** - * tb_lc_configure_link() - Let LC know about configured link - * @sw: Switch that is being added + * tb_lc_configure_port() - Let LC know about configured port + * @port: Port that is set as configured * - * Informs LC of both parent switch and @sw that there is established - * link between the two. + * Sets the port configured for power management purposes. */ -int tb_lc_configure_link(struct tb_switch *sw) +int tb_lc_configure_port(struct tb_port *port) { - struct tb_port *up, *down; - int ret; + return tb_lc_set_port_configured(port, true); +} + +/** + * tb_lc_unconfigure_port() - Let LC know about unconfigured port + * @port: Port that is set as configured + * + * Sets the port unconfigured for power management purposes. + */ +void tb_lc_unconfigure_port(struct tb_port *port) +{ + tb_lc_set_port_configured(port, false); +} + +static int tb_lc_set_xdomain_configured(struct tb_port *port, bool configure) +{ + struct tb_switch *sw = port->sw; + u32 ctrl, lane; + int cap, ret; - if (!tb_route(sw) || tb_switch_is_icm(sw)) + if (sw->generation < 2) return 0; - up = tb_upstream_port(sw); - down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent)); + cap = find_port_lc_cap(port); + if (cap < 0) + return cap; + + ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1); + if (ret) + return ret; + + /* Resolve correct lane */ + if (port->port % 2) + lane = TB_LC_SX_CTRL_L1D; + else + lane = TB_LC_SX_CTRL_L2D; + + if (configure) + ctrl |= lane; + else + ctrl &= ~lane; + + return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1); +} + +/** + * tb_lc_configure_xdomain() - Inform LC that the link is XDomain + * @port: Switch downstream port connected to another host + * + * Sets the lane configured for XDomain accordingly so that the LC knows + * about this. Returns %0 in success and negative errno in failure. + */ +int tb_lc_configure_xdomain(struct tb_port *port) +{ + return tb_lc_set_xdomain_configured(port, true); +} + +/** + * tb_lc_unconfigure_xdomain() - Unconfigure XDomain from port + * @port: Switch downstream port that was connected to another host + * + * Unsets the lane XDomain configuration. + */ +void tb_lc_unconfigure_xdomain(struct tb_port *port) +{ + tb_lc_set_xdomain_configured(port, false); +} + +/** + * tb_lc_start_lane_initialization() - Start lane initialization + * @port: Device router lane 0 adapter + * + * Starts lane initialization for @port after the router resumed from + * sleep. Should be called for those downstream lane adapters that were + * not connected (tb_lc_configure_port() was not called) before sleep. + * + * Returns %0 in success and negative errno in case of failure. + */ +int tb_lc_start_lane_initialization(struct tb_port *port) +{ + struct tb_switch *sw = port->sw; + int ret, cap; + u32 ctrl; - /* Configure parent link toward this switch */ - ret = tb_lc_configure_lane(down, true); + if (!tb_route(sw)) + return 0; + + if (sw->generation < 2) + return 0; + + cap = find_port_lc_cap(port); + if (cap < 0) + return cap; + + ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1); if (ret) return ret; - /* Configure upstream link from this switch to the parent */ - ret = tb_lc_configure_lane(up, true); + ctrl |= TB_LC_SX_CTRL_SLI; + + return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1); +} + +/** + * tb_lc_is_clx_supported() - Check whether CLx is supported by the lane adapter + * @port: Lane adapter + * + * TB_LC_LINK_ATTR_CPS bit reflects if the link supports CLx including + * active cables (if connected on the link). + */ +bool tb_lc_is_clx_supported(struct tb_port *port) +{ + struct tb_switch *sw = port->sw; + int cap, ret; + u32 val; + + cap = find_port_lc_cap(port); + if (cap < 0) + return false; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_ATTR, 1); if (ret) - tb_lc_configure_lane(down, false); + return false; - return ret; + return !!(val & TB_LC_LINK_ATTR_CPS); } /** - * tb_lc_unconfigure_link() - Let LC know about unconfigured link - * @sw: Switch to unconfigure + * tb_lc_is_usb_plugged() - Is there USB device connected to port + * @port: Device router lane 0 adapter * - * Informs LC of both parent switch and @sw that the link between the - * two does not exist anymore. + * Returns true if the @port has USB type-C device connected. */ -void tb_lc_unconfigure_link(struct tb_switch *sw) +bool tb_lc_is_usb_plugged(struct tb_port *port) { - struct tb_port *up, *down; + struct tb_switch *sw = port->sw; + int cap, ret; + u32 val; - if (sw->is_unplugged || !tb_route(sw) || tb_switch_is_icm(sw)) - return; + if (sw->generation != 3) + return false; - up = tb_upstream_port(sw); - down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent)); + cap = find_port_lc_cap(port); + if (cap < 0) + return false; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_CS_42, 1); + if (ret) + return false; + + return !!(val & TB_LC_CS_42_USB_PLUGGED); +} + +/** + * tb_lc_is_xhci_connected() - Is the internal xHCI connected + * @port: Device router lane 0 adapter + * + * Returns true if the internal xHCI has been connected to @port. + */ +bool tb_lc_is_xhci_connected(struct tb_port *port) +{ + struct tb_switch *sw = port->sw; + int cap, ret; + u32 val; + + if (sw->generation != 3) + return false; + + cap = find_port_lc_cap(port); + if (cap < 0) + return false; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_REQ, 1); + if (ret) + return false; + + return !!(val & TB_LC_LINK_REQ_XHCI_CONNECT); +} + +static int __tb_lc_xhci_connect(struct tb_port *port, bool connect) +{ + struct tb_switch *sw = port->sw; + int cap, ret; + u32 val; + + if (sw->generation != 3) + return -EINVAL; + + cap = find_port_lc_cap(port); + if (cap < 0) + return cap; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_REQ, 1); + if (ret) + return ret; + + if (connect) + val |= TB_LC_LINK_REQ_XHCI_CONNECT; + else + val &= ~TB_LC_LINK_REQ_XHCI_CONNECT; + + return tb_sw_write(sw, &val, TB_CFG_SWITCH, cap + TB_LC_LINK_REQ, 1); +} + +/** + * tb_lc_xhci_connect() - Connect internal xHCI + * @port: Device router lane 0 adapter + * + * Tells LC to connect the internal xHCI to @port. Returns %0 on success + * and negative errno in case of failure. Can be called for Thunderbolt 3 + * routers only. + */ +int tb_lc_xhci_connect(struct tb_port *port) +{ + int ret; + + ret = __tb_lc_xhci_connect(port, true); + if (ret) + return ret; + + tb_port_dbg(port, "xHCI connected\n"); + return 0; +} + +/** + * tb_lc_xhci_disconnect() - Disconnect internal xHCI + * @port: Device router lane 0 adapter + * + * Tells LC to disconnect the internal xHCI from @port. Can be called + * for Thunderbolt 3 routers only. + */ +void tb_lc_xhci_disconnect(struct tb_port *port) +{ + __tb_lc_xhci_connect(port, false); + tb_port_dbg(port, "xHCI disconnected\n"); +} - tb_lc_configure_lane(up, false); - tb_lc_configure_lane(down, false); +static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset, + unsigned int flags) +{ + u32 ctrl; + int ret; + + /* + * Enable wake on PCIe and USB4 (wake coming from another + * router). + */ + ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, + offset + TB_LC_SX_CTRL, 1); + if (ret) + return ret; + + ctrl &= ~(TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD | TB_LC_SX_CTRL_WODPC | + TB_LC_SX_CTRL_WODPD | TB_LC_SX_CTRL_WOP | TB_LC_SX_CTRL_WOU4); + + if (flags & TB_WAKE_ON_CONNECT) + ctrl |= TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD; + if (flags & TB_WAKE_ON_USB4) + ctrl |= TB_LC_SX_CTRL_WOU4; + if (flags & TB_WAKE_ON_PCIE) + ctrl |= TB_LC_SX_CTRL_WOP; + if (flags & TB_WAKE_ON_DP) + ctrl |= TB_LC_SX_CTRL_WODPC | TB_LC_SX_CTRL_WODPD; + + return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, offset + TB_LC_SX_CTRL, 1); +} + +/** + * tb_lc_set_wake() - Enable/disable wake + * @sw: Switch whose wakes to configure + * @flags: Wakeup flags (%0 to disable) + * + * For each LC sets wake bits accordingly. + */ +int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags) +{ + int start, size, nlc, ret, i; + u32 desc; + + if (sw->generation < 2) + return 0; + + if (!tb_route(sw)) + return 0; + + ret = read_lc_desc(sw, &desc); + if (ret) + return ret; + + /* Figure out number of link controllers */ + nlc = desc & TB_LC_DESC_NLC_MASK; + start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT; + size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT; + + /* For each link controller set sleep bit */ + for (i = 0; i < nlc; i++) { + unsigned int offset = sw->cap_lc + start + i * size; + + ret = tb_lc_set_wake_one(sw, offset, flags); + if (ret) + return ret; + } + + return 0; } /** @@ -366,3 +628,17 @@ int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in) tb_port_dbg(in, "sink %d de-allocated\n", sink); return 0; } + +/** + * tb_lc_force_power() - Forces LC to be powered on + * @sw: Thunderbolt switch + * + * This is useful to let authentication cycle pass even without + * a Thunderbolt link present. + */ +int tb_lc_force_power(struct tb_switch *sw) +{ + u32 in = 0xffff; + + return tb_sw_write(sw, &in, TB_CFG_SWITCH, TB_LC_POWER, 1); +} diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index 1be491ecbb45..4dce2edd86ea 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -13,10 +13,13 @@ #include <linux/slab.h> #include <linux/errno.h> #include <linux/pci.h> +#include <linux/dma-mapping.h> #include <linux/interrupt.h> +#include <linux/iommu.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/property.h> +#include <linux/string_helpers.h> #include "nhi.h" #include "nhi_regs.h" @@ -24,13 +27,12 @@ #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") +#define RING_FIRST_USABLE_HOPID 1 /* - * Used to enable end-to-end workaround for missing RX packets. Do not - * use this ring for anything else. + * Used with QUIRK_E2E to specify an unused HopID the Rx credits are + * transferred. */ -#define RING_E2E_UNUSED_HOPID 2 -#define RING_FIRST_USABLE_HOPID TB_PATH_MIN_HOPID - +#define RING_E2E_RESERVED_HOPID RING_FIRST_USABLE_HOPID /* * Minimal number of vectors when we use MSI-X. Two for control channel * Rx/Tx and the rest four are for cross domain DMA paths. @@ -40,6 +42,10 @@ #define NHI_MAILBOX_TIMEOUT 500 /* ms */ +/* Host interface quirks */ +#define QUIRK_AUTO_CLEAR_INT BIT(0) +#define QUIRK_E2E BIT(1) + static int ring_interrupt_index(struct tb_ring *ring) { int bit = ring->hop; @@ -48,7 +54,7 @@ static int ring_interrupt_index(struct tb_ring *ring) return bit; } -/** +/* * ring_interrupt_active() - activate/deactivate interrupts for a single ring * * ring->nhi->lock must be held. @@ -71,14 +77,17 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active) else index = ring->hop + ring->nhi->hop_count; - /* - * Ask the hardware to clear interrupt status bits automatically - * since we already know which interrupt was triggered. - */ - misc = ioread32(ring->nhi->iobase + REG_DMA_MISC); - if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) { - misc |= REG_DMA_MISC_INT_AUTO_CLEAR; - iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC); + if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) { + /* + * Ask the hardware to clear interrupt status + * bits automatically since we already know + * which interrupt was triggered. + */ + misc = ioread32(ring->nhi->iobase + REG_DMA_MISC); + if (!(misc & REG_DMA_MISC_INT_AUTO_CLEAR)) { + misc |= REG_DMA_MISC_INT_AUTO_CLEAR; + iowrite32(misc, ring->nhi->iobase + REG_DMA_MISC); + } } ivr_base = ring->nhi->iobase + REG_INT_VEC_ALLOC_BASE; @@ -109,7 +118,7 @@ static void ring_interrupt_active(struct tb_ring *ring, bool active) iowrite32(new, ring->nhi->iobase + reg); } -/** +/* * nhi_disable_interrupts() - disable interrupts for all rings * * Use only during init and shutdown. @@ -186,7 +195,7 @@ static bool ring_empty(struct tb_ring *ring) return ring->head == ring->tail; } -/** +/* * ring_write_descriptors() - post frames from ring->queue to the controller * * ring->lock is held. @@ -216,7 +225,7 @@ static void ring_write_descriptors(struct tb_ring *ring) } } -/** +/* * ring_work() - progress completed frames * * If the ring is shutting down then all frames are marked as canceled and @@ -382,11 +391,24 @@ void tb_ring_poll_complete(struct tb_ring *ring) } EXPORT_SYMBOL_GPL(tb_ring_poll_complete); +static void ring_clear_msix(const struct tb_ring *ring) +{ + if (ring->nhi->quirks & QUIRK_AUTO_CLEAR_INT) + return; + + if (ring->is_tx) + ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE); + else + ioread32(ring->nhi->iobase + REG_RING_NOTIFY_BASE + + 4 * (ring->nhi->hop_count / 32)); +} + static irqreturn_t ring_msix(int irq, void *data) { struct tb_ring *ring = data; spin_lock(&ring->nhi->lock); + ring_clear_msix(ring); spin_lock(&ring->lock); __ring_interrupt(ring); spin_unlock(&ring->lock); @@ -410,12 +432,23 @@ static int ring_request_msix(struct tb_ring *ring, bool no_suspend) ring->vector = ret; - ring->irq = pci_irq_vector(ring->nhi->pdev, ring->vector); - if (ring->irq < 0) - return ring->irq; + ret = pci_irq_vector(ring->nhi->pdev, ring->vector); + if (ret < 0) + goto err_ida_remove; + + ring->irq = ret; irqflags = no_suspend ? IRQF_NO_SUSPEND : 0; - return request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring); + ret = request_irq(ring->irq, ring_msix, irqflags, "thunderbolt", ring); + if (ret) + goto err_ida_remove; + + return 0; + +err_ida_remove: + ida_simple_remove(&nhi->msix_ida, ring->vector); + + return ret; } static void ring_release_msix(struct tb_ring *ring) @@ -431,8 +464,18 @@ static void ring_release_msix(struct tb_ring *ring) static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring) { + unsigned int start_hop = RING_FIRST_USABLE_HOPID; int ret = 0; + if (nhi->quirks & QUIRK_E2E) { + start_hop = RING_FIRST_USABLE_HOPID + 1; + if (ring->flags & RING_FLAG_E2E && !ring->is_tx) { + dev_dbg(&nhi->pdev->dev, "quirking E2E TX HopID %u -> %u\n", + ring->e2e_tx_hop, RING_E2E_RESERVED_HOPID); + ring->e2e_tx_hop = RING_E2E_RESERVED_HOPID; + } + } + spin_lock_irq(&nhi->lock); if (ring->hop < 0) { @@ -440,9 +483,9 @@ static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring) /* * Automatically allocate HopID from the non-reserved - * range 8 .. hop_count - 1. + * range 1 .. hop_count - 1. */ - for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) { + for (i = start_hop; i < nhi->hop_count; i++) { if (ring->is_tx) { if (!nhi->tx_rings[i]) { ring->hop = i; @@ -457,6 +500,11 @@ static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring) } } + if (ring->hop > 0 && ring->hop < start_hop) { + dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop); + ret = -EINVAL; + goto err_unlock; + } if (ring->hop < 0 || ring->hop >= nhi->hop_count) { dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop); ret = -EINVAL; @@ -487,7 +535,7 @@ err_unlock: static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size, bool transmit, unsigned int flags, - u16 sof_mask, u16 eof_mask, + int e2e_tx_hop, u16 sof_mask, u16 eof_mask, void (*start_poll)(void *), void *poll_data) { @@ -496,10 +544,6 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size, dev_dbg(&nhi->pdev->dev, "allocating %s ring %d of size %d\n", transmit ? "TX" : "RX", hop, size); - /* Tx Ring 2 is reserved for E2E workaround */ - if (transmit && hop == RING_E2E_UNUSED_HOPID) - return NULL; - ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) return NULL; @@ -514,6 +558,7 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size, ring->is_tx = transmit; ring->size = size; ring->flags = flags; + ring->e2e_tx_hop = e2e_tx_hop; ring->sof_mask = sof_mask; ring->eof_mask = eof_mask; ring->head = 0; @@ -558,7 +603,7 @@ err_free_ring: struct tb_ring *tb_ring_alloc_tx(struct tb_nhi *nhi, int hop, int size, unsigned int flags) { - return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, NULL, NULL); + return tb_ring_alloc(nhi, hop, size, true, flags, 0, 0, 0, NULL, NULL); } EXPORT_SYMBOL_GPL(tb_ring_alloc_tx); @@ -568,6 +613,7 @@ EXPORT_SYMBOL_GPL(tb_ring_alloc_tx); * @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation. * @size: Number of entries in the ring * @flags: Flags for the ring + * @e2e_tx_hop: Transmit HopID when E2E is enabled in @flags * @sof_mask: Mask of PDF values that start a frame * @eof_mask: Mask of PDF values that end a frame * @start_poll: If not %NULL the ring will call this function when an @@ -576,16 +622,18 @@ EXPORT_SYMBOL_GPL(tb_ring_alloc_tx); * @poll_data: Optional data passed to @start_poll */ struct tb_ring *tb_ring_alloc_rx(struct tb_nhi *nhi, int hop, int size, - unsigned int flags, u16 sof_mask, u16 eof_mask, + unsigned int flags, int e2e_tx_hop, + u16 sof_mask, u16 eof_mask, void (*start_poll)(void *), void *poll_data) { - return tb_ring_alloc(nhi, hop, size, false, flags, sof_mask, eof_mask, + return tb_ring_alloc(nhi, hop, size, false, flags, e2e_tx_hop, sof_mask, eof_mask, start_poll, poll_data); } EXPORT_SYMBOL_GPL(tb_ring_alloc_rx); /** * tb_ring_start() - enable a ring + * @ring: Ring to start * * Must not be invoked in parallel with tb_ring_stop(). */ @@ -614,19 +662,6 @@ void tb_ring_start(struct tb_ring *ring) flags = RING_FLAG_ENABLE | RING_FLAG_RAW; } - if (ring->flags & RING_FLAG_E2E && !ring->is_tx) { - u32 hop; - - /* - * In order not to lose Rx packets we enable end-to-end - * workaround which transfers Rx credits to an unused Tx - * HopID. - */ - hop = RING_E2E_UNUSED_HOPID << REG_RX_OPTIONS_E2E_HOP_SHIFT; - hop &= REG_RX_OPTIONS_E2E_HOP_MASK; - flags |= hop | RING_FLAG_E2E_FLOW_CONTROL; - } - ring_iowrite64desc(ring, ring->descriptors_dma, 0); if (ring->is_tx) { ring_iowrite32desc(ring, ring->size, 12); @@ -639,6 +674,31 @@ void tb_ring_start(struct tb_ring *ring) ring_iowrite32options(ring, sof_eof_mask, 4); ring_iowrite32options(ring, flags, 0); } + + /* + * Now that the ring valid bit is set we can configure E2E if + * enabled for the ring. + */ + if (ring->flags & RING_FLAG_E2E) { + if (!ring->is_tx) { + u32 hop; + + hop = ring->e2e_tx_hop << REG_RX_OPTIONS_E2E_HOP_SHIFT; + hop &= REG_RX_OPTIONS_E2E_HOP_MASK; + flags |= hop; + + dev_dbg(&ring->nhi->pdev->dev, + "enabling E2E for %s %d with TX HopID %d\n", + RING_TYPE(ring), ring->hop, ring->e2e_tx_hop); + } else { + dev_dbg(&ring->nhi->pdev->dev, "enabling E2E for %s %d\n", + RING_TYPE(ring), ring->hop); + } + + flags |= RING_FLAG_E2E_FLOW_CONTROL; + ring_iowrite32options(ring, flags, 0); + } + ring_interrupt_active(ring, true); ring->running = true; err: @@ -649,6 +709,7 @@ EXPORT_SYMBOL_GPL(tb_ring_start); /** * tb_ring_stop() - shutdown a ring + * @ring: Ring to stop * * Must not be invoked from a callback. * @@ -736,7 +797,7 @@ void tb_ring_free(struct tb_ring *ring) dev_dbg(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring), ring->hop); - /** + /* * ring->work can no longer be scheduled (it is scheduled only * by nhi_interrupt_work, ring_stop and ring_msix). Wait for it * to finish before freeing the ring. @@ -885,6 +946,22 @@ static int nhi_suspend_noirq(struct device *dev) return __nhi_suspend_noirq(dev, device_may_wakeup(dev)); } +static int nhi_freeze_noirq(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct tb *tb = pci_get_drvdata(pdev); + + return tb_domain_freeze_noirq(tb); +} + +static int nhi_thaw_noirq(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct tb *tb = pci_get_drvdata(pdev); + + return tb_domain_thaw_noirq(tb); +} + static bool nhi_wake_supported(struct pci_dev *pdev) { u8 val; @@ -1039,9 +1116,75 @@ static void nhi_shutdown(struct tb_nhi *nhi) nhi->ops->shutdown(nhi); } +static void nhi_check_quirks(struct tb_nhi *nhi) +{ + if (nhi->pdev->vendor == PCI_VENDOR_ID_INTEL) { + /* + * Intel hardware supports auto clear of the interrupt + * status register right after interrupt is being + * issued. + */ + nhi->quirks |= QUIRK_AUTO_CLEAR_INT; + + switch (nhi->pdev->device) { + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: + /* + * Falcon Ridge controller needs the end-to-end + * flow control workaround to avoid losing Rx + * packets when RING_FLAG_E2E is set. + */ + nhi->quirks |= QUIRK_E2E; + break; + } + } +} + +static int nhi_check_iommu_pdev(struct pci_dev *pdev, void *data) +{ + if (!pdev->external_facing || + !device_iommu_capable(&pdev->dev, IOMMU_CAP_PRE_BOOT_PROTECTION)) + return 0; + *(bool *)data = true; + return 1; /* Stop walking */ +} + +static void nhi_check_iommu(struct tb_nhi *nhi) +{ + struct pci_bus *bus = nhi->pdev->bus; + bool port_ok = false; + + /* + * Ideally what we'd do here is grab every PCI device that + * represents a tunnelling adapter for this NHI and check their + * status directly, but unfortunately USB4 seems to make it + * obnoxiously difficult to reliably make any correlation. + * + * So for now we'll have to bodge it... Hoping that the system + * is at least sane enough that an adapter is in the same PCI + * segment as its NHI, if we can find *something* on that segment + * which meets the requirements for Kernel DMA Protection, we'll + * take that to imply that firmware is aware and has (hopefully) + * done the right thing in general. We need to know that the PCI + * layer has seen the ExternalFacingPort property which will then + * inform the IOMMU layer to enforce the complete "untrusted DMA" + * flow, but also that the IOMMU driver itself can be trusted not + * to have been subverted by a pre-boot DMA attack. + */ + while (bus->parent) + bus = bus->parent; + + pci_walk_bus(bus, nhi_check_iommu_pdev, &port_ok); + + nhi->iommu_dma_protection = port_ok; + dev_dbg(&nhi->pdev->dev, "IOMMU DMA protection is %s\n", + str_enabled_disabled(port_ok)); +} + static int nhi_init_msi(struct tb_nhi *nhi) { struct pci_dev *pdev = nhi->pdev; + struct device *dev = &pdev->dev; int res, irq, nvec; /* In case someone left them on. */ @@ -1072,10 +1215,8 @@ static int nhi_init_msi(struct tb_nhi *nhi) res = devm_request_irq(&pdev->dev, irq, nhi_msi, IRQF_NO_SUSPEND, "thunderbolt", nhi); - if (res) { - dev_err(&pdev->dev, "request_irq failed, aborting\n"); - return res; - } + if (res) + return dev_err_probe(dev, res, "request_irq failed, aborting\n"); } return 0; @@ -1091,28 +1232,46 @@ static bool nhi_imr_valid(struct pci_dev *pdev) return true; } +static struct tb *nhi_select_cm(struct tb_nhi *nhi) +{ + struct tb *tb; + + /* + * USB4 case is simple. If we got control of any of the + * capabilities, we use software CM. + */ + if (tb_acpi_is_native()) + return tb_probe(nhi); + + /* + * Either firmware based CM is running (we did not get control + * from the firmware) or this is pre-USB4 PC so try first + * firmware CM and then fallback to software CM. + */ + tb = icm_probe(nhi); + if (!tb) + tb = tb_probe(nhi); + + return tb; +} + static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + struct device *dev = &pdev->dev; struct tb_nhi *nhi; struct tb *tb; int res; - if (!nhi_imr_valid(pdev)) { - dev_warn(&pdev->dev, "firmware image not valid, aborting\n"); - return -ENODEV; - } + if (!nhi_imr_valid(pdev)) + return dev_err_probe(dev, -ENODEV, "firmware image not valid, aborting\n"); res = pcim_enable_device(pdev); - if (res) { - dev_err(&pdev->dev, "cannot enable PCI device, aborting\n"); - return res; - } + if (res) + return dev_err_probe(dev, res, "cannot enable PCI device, aborting\n"); res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt"); - if (res) { - dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n"); - return res; - } + if (res) + return dev_err_probe(dev, res, "cannot obtain PCI resources, aborting\n"); nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL); if (!nhi) @@ -1120,12 +1279,10 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) nhi->pdev = pdev; nhi->ops = (const struct tb_nhi_ops *)id->driver_data; - /* cannot fail - table is allocated bin pcim_iomap_regions */ + /* cannot fail - table is allocated in pcim_iomap_regions */ nhi->iobase = pcim_iomap_table(pdev)[0]; nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; - if (nhi->hop_count != 12 && nhi->hop_count != 32) - dev_warn(&pdev->dev, "unexpected hop count: %d\n", - nhi->hop_count); + dev_dbg(dev, "total paths: %d\n", nhi->hop_count); nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, sizeof(*nhi->tx_rings), GFP_KERNEL); @@ -1134,21 +1291,18 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (!nhi->tx_rings || !nhi->rx_rings) return -ENOMEM; + nhi_check_quirks(nhi); + nhi_check_iommu(nhi); + res = nhi_init_msi(nhi); - if (res) { - dev_err(&pdev->dev, "cannot enable MSI, aborting\n"); - return res; - } + if (res) + return dev_err_probe(dev, res, "cannot enable MSI, aborting\n"); spin_lock_init(&nhi->lock); res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (res) - res = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); - if (res) { - dev_err(&pdev->dev, "failed to set DMA mask\n"); - return res; - } + return dev_err_probe(dev, res, "failed to set DMA mask\n"); pci_set_master(pdev); @@ -1158,16 +1312,12 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) return res; } - tb = icm_probe(nhi); + tb = nhi_select_cm(nhi); if (!tb) - tb = tb_probe(nhi); - if (!tb) { - dev_err(&nhi->pdev->dev, + return dev_err_probe(dev, -ENODEV, "failed to determine connection manager, aborting\n"); - return -ENODEV; - } - dev_dbg(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n"); + dev_dbg(dev, "NHI initialized, starting thunderbolt\n"); res = tb_domain_add(tb); if (res) { @@ -1181,6 +1331,8 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) } pci_set_drvdata(pdev, tb); + device_wakeup_enable(&pdev->dev); + pm_runtime_allow(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(&pdev->dev); @@ -1210,14 +1362,13 @@ static void nhi_remove(struct pci_dev *pdev) static const struct dev_pm_ops nhi_pm_ops = { .suspend_noirq = nhi_suspend_noirq, .resume_noirq = nhi_resume_noirq, - .freeze_noirq = nhi_suspend_noirq, /* + .freeze_noirq = nhi_freeze_noirq, /* * we just disable hotplug, the * pci-tunnels stay alive. */ - .thaw_noirq = nhi_resume_noirq, + .thaw_noirq = nhi_thaw_noirq, .restore_noirq = nhi_resume_noirq, .suspend = nhi_suspend, - .freeze = nhi_suspend, .poweroff_noirq = nhi_poweroff_noirq, .poweroff = nhi_suspend, .complete = nhi_complete, @@ -1270,6 +1421,29 @@ static struct pci_device_id nhi_ids[] = { .driver_data = (kernel_ulong_t)&icl_nhi_ops }, { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1), .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + /* Thunderbolt 4 */ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI0), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_NHI1), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI0), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL_H_NHI1), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL_NHI0), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADL_NHI1), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL_NHI0), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_RPL_NHI1), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_M_NHI0), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI0), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MTL_P_NHI1), + .driver_data = (kernel_ulong_t)&icl_nhi_ops }, /* Any USB4 compliant host */ { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) }, @@ -1285,6 +1459,7 @@ static struct pci_driver nhi_driver = { .id_table = nhi_ids, .probe = nhi_probe, .remove = nhi_remove, + .shutdown = nhi_remove, .driver.pm = &nhi_pm_ops, }; diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h index 5d276ee9b38e..b0718020c6f5 100644 --- a/drivers/thunderbolt/nhi.h +++ b/drivers/thunderbolt/nhi.h @@ -55,6 +55,8 @@ extern const struct tb_nhi_ops icl_nhi_ops; * need for the PCI quirk anymore as we will use ICM also on Apple * hardware. */ +#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_2C_NHI 0x1134 +#define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_4C_NHI 0x1137 #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_NHI 0x157d #define PCI_DEVICE_ID_INTEL_WIN_RIDGE_2C_BRIDGE 0x157e #define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_NHI 0x15bf @@ -71,8 +73,19 @@ extern const struct tb_nhi_ops icl_nhi_ops; #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE 0x15ea #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_NHI 0x15eb #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE 0x15ef +#define PCI_DEVICE_ID_INTEL_ADL_NHI0 0x463e +#define PCI_DEVICE_ID_INTEL_ADL_NHI1 0x466d +#define PCI_DEVICE_ID_INTEL_MTL_M_NHI0 0x7eb2 +#define PCI_DEVICE_ID_INTEL_MTL_P_NHI0 0x7ec2 +#define PCI_DEVICE_ID_INTEL_MTL_P_NHI1 0x7ec3 #define PCI_DEVICE_ID_INTEL_ICL_NHI1 0x8a0d #define PCI_DEVICE_ID_INTEL_ICL_NHI0 0x8a17 +#define PCI_DEVICE_ID_INTEL_TGL_NHI0 0x9a1b +#define PCI_DEVICE_ID_INTEL_TGL_NHI1 0x9a1d +#define PCI_DEVICE_ID_INTEL_TGL_H_NHI0 0x9a1f +#define PCI_DEVICE_ID_INTEL_TGL_H_NHI1 0x9a21 +#define PCI_DEVICE_ID_INTEL_RPL_NHI0 0xa73e +#define PCI_DEVICE_ID_INTEL_RPL_NHI1 0xa76d #define PCI_CLASS_SERIAL_USB_USB4 0x0c0340 diff --git a/drivers/thunderbolt/nhi_ops.c b/drivers/thunderbolt/nhi_ops.c index 6795851aac95..96da07e88c52 100644 --- a/drivers/thunderbolt/nhi_ops.c +++ b/drivers/thunderbolt/nhi_ops.c @@ -59,7 +59,7 @@ static int icl_nhi_force_power(struct tb_nhi *nhi, bool power) pci_write_config_dword(nhi->pdev, VS_CAP_22, vs_cap); if (power) { - unsigned int retries = 10; + unsigned int retries = 350; u32 val; /* Wait until the firmware tells it is up and running */ @@ -67,7 +67,7 @@ static int icl_nhi_force_power(struct tb_nhi *nhi, bool power) pci_read_config_dword(nhi->pdev, VS_CAP_9, &val); if (val & VS_CAP_9_FW_READY) return 0; - msleep(250); + usleep_range(3000, 3100); } while (--retries); return -ETIMEDOUT; @@ -97,7 +97,7 @@ static int icl_nhi_lc_mailbox_cmd_complete(struct tb_nhi *nhi, int timeout) pci_read_config_dword(nhi->pdev, VS_CAP_18, &data); if (data & VS_CAP_18_DONE) goto clear; - msleep(100); + usleep_range(1000, 1100); } while (time_before(jiffies, end)); return -ETIMEDOUT; @@ -121,31 +121,38 @@ static void icl_nhi_set_ltr(struct tb_nhi *nhi) static int icl_nhi_suspend(struct tb_nhi *nhi) { + struct tb *tb = pci_get_drvdata(nhi->pdev); int ret; if (icl_nhi_is_device_connected(nhi)) return 0; - /* - * If there is no device connected we need to perform both: a - * handshake through LC mailbox and force power down before - * entering D3. - */ - icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET); - ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT); - if (ret) - return ret; + if (tb_switch_is_icm(tb->root_switch)) { + /* + * If there is no device connected we need to perform + * both: a handshake through LC mailbox and force power + * down before entering D3. + */ + icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET); + ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT); + if (ret) + return ret; + } return icl_nhi_force_power(nhi, false); } static int icl_nhi_suspend_noirq(struct tb_nhi *nhi, bool wakeup) { + struct tb *tb = pci_get_drvdata(nhi->pdev); enum icl_lc_mailbox_cmd cmd; if (!pm_suspend_via_firmware()) return icl_nhi_suspend(nhi); + if (!tb_switch_is_icm(tb->root_switch)) + return 0; + cmd = wakeup ? ICL_LC_GO2SX : ICL_LC_GO2SX_NO_WAKE; icl_nhi_lc_mailbox_cmd(nhi, cmd); return icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT); diff --git a/drivers/thunderbolt/nvm.c b/drivers/thunderbolt/nvm.c new file mode 100644 index 000000000000..3dd5f81bd629 --- /dev/null +++ b/drivers/thunderbolt/nvm.c @@ -0,0 +1,630 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVM helpers + * + * Copyright (C) 2020, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/idr.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> + +#include "tb.h" + +/* Intel specific NVM offsets */ +#define INTEL_NVM_DEVID 0x05 +#define INTEL_NVM_VERSION 0x08 +#define INTEL_NVM_CSS 0x10 +#define INTEL_NVM_FLASH_SIZE 0x45 + +/* ASMedia specific NVM offsets */ +#define ASMEDIA_NVM_DATE 0x1c +#define ASMEDIA_NVM_VERSION 0x28 + +static DEFINE_IDA(nvm_ida); + +/** + * struct tb_nvm_vendor_ops - Vendor specific NVM operations + * @read_version: Reads out NVM version from the flash + * @validate: Validates the NVM image before update (optional) + * @write_headers: Writes headers before the rest of the image (optional) + */ +struct tb_nvm_vendor_ops { + int (*read_version)(struct tb_nvm *nvm); + int (*validate)(struct tb_nvm *nvm); + int (*write_headers)(struct tb_nvm *nvm); +}; + +/** + * struct tb_nvm_vendor - Vendor to &struct tb_nvm_vendor_ops mapping + * @vendor: Vendor ID + * @vops: Vendor specific NVM operations + * + * Maps vendor ID to NVM vendor operations. If there is no mapping then + * NVM firmware upgrade is disabled for the device. + */ +struct tb_nvm_vendor { + u16 vendor; + const struct tb_nvm_vendor_ops *vops; +}; + +static int intel_switch_nvm_version(struct tb_nvm *nvm) +{ + struct tb_switch *sw = tb_to_switch(nvm->dev); + u32 val, nvm_size, hdr_size; + int ret; + + /* + * If the switch is in safe-mode the only accessible portion of + * the NVM is the non-active one where userspace is expected to + * write new functional NVM. + */ + if (sw->safe_mode) + return 0; + + ret = tb_switch_nvm_read(sw, INTEL_NVM_FLASH_SIZE, &val, sizeof(val)); + if (ret) + return ret; + + hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K; + nvm_size = (SZ_1M << (val & 7)) / 8; + nvm_size = (nvm_size - hdr_size) / 2; + + ret = tb_switch_nvm_read(sw, INTEL_NVM_VERSION, &val, sizeof(val)); + if (ret) + return ret; + + nvm->major = (val >> 16) & 0xff; + nvm->minor = (val >> 8) & 0xff; + nvm->active_size = nvm_size; + + return 0; +} + +static int intel_switch_nvm_validate(struct tb_nvm *nvm) +{ + struct tb_switch *sw = tb_to_switch(nvm->dev); + unsigned int image_size, hdr_size; + u16 ds_size, device_id; + u8 *buf = nvm->buf; + + image_size = nvm->buf_data_size; + + /* + * FARB pointer must point inside the image and must at least + * contain parts of the digital section we will be reading here. + */ + hdr_size = (*(u32 *)buf) & 0xffffff; + if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size) + return -EINVAL; + + /* Digital section start should be aligned to 4k page */ + if (!IS_ALIGNED(hdr_size, SZ_4K)) + return -EINVAL; + + /* + * Read digital section size and check that it also fits inside + * the image. + */ + ds_size = *(u16 *)(buf + hdr_size); + if (ds_size >= image_size) + return -EINVAL; + + if (sw->safe_mode) + return 0; + + /* + * Make sure the device ID in the image matches the one + * we read from the switch config space. + */ + device_id = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID); + if (device_id != sw->config.device_id) + return -EINVAL; + + /* Skip headers in the image */ + nvm->buf_data_start = buf + hdr_size; + nvm->buf_data_size = image_size - hdr_size; + + return 0; +} + +static int intel_switch_nvm_write_headers(struct tb_nvm *nvm) +{ + struct tb_switch *sw = tb_to_switch(nvm->dev); + + if (sw->generation < 3) { + int ret; + + /* Write CSS headers first */ + ret = dma_port_flash_write(sw->dma_port, + DMA_PORT_CSS_ADDRESS, nvm->buf + INTEL_NVM_CSS, + DMA_PORT_CSS_MAX_SIZE); + if (ret) + return ret; + } + + return 0; +} + +static const struct tb_nvm_vendor_ops intel_switch_nvm_ops = { + .read_version = intel_switch_nvm_version, + .validate = intel_switch_nvm_validate, + .write_headers = intel_switch_nvm_write_headers, +}; + +static int asmedia_switch_nvm_version(struct tb_nvm *nvm) +{ + struct tb_switch *sw = tb_to_switch(nvm->dev); + u32 val; + int ret; + + ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_VERSION, &val, sizeof(val)); + if (ret) + return ret; + + nvm->major = (val << 16) & 0xff0000; + nvm->major |= val & 0x00ff00; + nvm->major |= (val >> 16) & 0x0000ff; + + ret = tb_switch_nvm_read(sw, ASMEDIA_NVM_DATE, &val, sizeof(val)); + if (ret) + return ret; + + nvm->minor = (val << 16) & 0xff0000; + nvm->minor |= val & 0x00ff00; + nvm->minor |= (val >> 16) & 0x0000ff; + + /* ASMedia NVM size is fixed to 512k */ + nvm->active_size = SZ_512K; + + return 0; +} + +static const struct tb_nvm_vendor_ops asmedia_switch_nvm_ops = { + .read_version = asmedia_switch_nvm_version, +}; + +/* Router vendor NVM support table */ +static const struct tb_nvm_vendor switch_nvm_vendors[] = { + { 0x174c, &asmedia_switch_nvm_ops }, + { PCI_VENDOR_ID_INTEL, &intel_switch_nvm_ops }, + { 0x8087, &intel_switch_nvm_ops }, +}; + +static int intel_retimer_nvm_version(struct tb_nvm *nvm) +{ + struct tb_retimer *rt = tb_to_retimer(nvm->dev); + u32 val, nvm_size; + int ret; + + ret = tb_retimer_nvm_read(rt, INTEL_NVM_VERSION, &val, sizeof(val)); + if (ret) + return ret; + + nvm->major = (val >> 16) & 0xff; + nvm->minor = (val >> 8) & 0xff; + + ret = tb_retimer_nvm_read(rt, INTEL_NVM_FLASH_SIZE, &val, sizeof(val)); + if (ret) + return ret; + + nvm_size = (SZ_1M << (val & 7)) / 8; + nvm_size = (nvm_size - SZ_16K) / 2; + nvm->active_size = nvm_size; + + return 0; +} + +static int intel_retimer_nvm_validate(struct tb_nvm *nvm) +{ + struct tb_retimer *rt = tb_to_retimer(nvm->dev); + unsigned int image_size, hdr_size; + u8 *buf = nvm->buf; + u16 ds_size, device; + + image_size = nvm->buf_data_size; + + /* + * FARB pointer must point inside the image and must at least + * contain parts of the digital section we will be reading here. + */ + hdr_size = (*(u32 *)buf) & 0xffffff; + if (hdr_size + INTEL_NVM_DEVID + 2 >= image_size) + return -EINVAL; + + /* Digital section start should be aligned to 4k page */ + if (!IS_ALIGNED(hdr_size, SZ_4K)) + return -EINVAL; + + /* + * Read digital section size and check that it also fits inside + * the image. + */ + ds_size = *(u16 *)(buf + hdr_size); + if (ds_size >= image_size) + return -EINVAL; + + /* + * Make sure the device ID in the image matches the retimer + * hardware. + */ + device = *(u16 *)(buf + hdr_size + INTEL_NVM_DEVID); + if (device != rt->device) + return -EINVAL; + + /* Skip headers in the image */ + nvm->buf_data_start = buf + hdr_size; + nvm->buf_data_size = image_size - hdr_size; + + return 0; +} + +static const struct tb_nvm_vendor_ops intel_retimer_nvm_ops = { + .read_version = intel_retimer_nvm_version, + .validate = intel_retimer_nvm_validate, +}; + +/* Retimer vendor NVM support table */ +static const struct tb_nvm_vendor retimer_nvm_vendors[] = { + { 0x8087, &intel_retimer_nvm_ops }, +}; + +/** + * tb_nvm_alloc() - Allocate new NVM structure + * @dev: Device owning the NVM + * + * Allocates new NVM structure with unique @id and returns it. In case + * of error returns ERR_PTR(). Specifically returns %-EOPNOTSUPP if the + * NVM format of the @dev is not known by the kernel. + */ +struct tb_nvm *tb_nvm_alloc(struct device *dev) +{ + const struct tb_nvm_vendor_ops *vops = NULL; + struct tb_nvm *nvm; + int ret, i; + + if (tb_is_switch(dev)) { + const struct tb_switch *sw = tb_to_switch(dev); + + for (i = 0; i < ARRAY_SIZE(switch_nvm_vendors); i++) { + const struct tb_nvm_vendor *v = &switch_nvm_vendors[i]; + + if (v->vendor == sw->config.vendor_id) { + vops = v->vops; + break; + } + } + + if (!vops) { + tb_sw_dbg(sw, "router NVM format of vendor %#x unknown\n", + sw->config.vendor_id); + return ERR_PTR(-EOPNOTSUPP); + } + } else if (tb_is_retimer(dev)) { + const struct tb_retimer *rt = tb_to_retimer(dev); + + for (i = 0; i < ARRAY_SIZE(retimer_nvm_vendors); i++) { + const struct tb_nvm_vendor *v = &retimer_nvm_vendors[i]; + + if (v->vendor == rt->vendor) { + vops = v->vops; + break; + } + } + + if (!vops) { + dev_dbg(dev, "retimer NVM format of vendor %#x unknown\n", + rt->vendor); + return ERR_PTR(-EOPNOTSUPP); + } + } else { + return ERR_PTR(-EOPNOTSUPP); + } + + nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); + if (!nvm) + return ERR_PTR(-ENOMEM); + + ret = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL); + if (ret < 0) { + kfree(nvm); + return ERR_PTR(ret); + } + + nvm->id = ret; + nvm->dev = dev; + nvm->vops = vops; + + return nvm; +} + +/** + * tb_nvm_read_version() - Read and populate NVM version + * @nvm: NVM structure + * + * Uses vendor specific means to read out and fill in the existing + * active NVM version. Returns %0 in case of success and negative errno + * otherwise. + */ +int tb_nvm_read_version(struct tb_nvm *nvm) +{ + const struct tb_nvm_vendor_ops *vops = nvm->vops; + + if (vops && vops->read_version) + return vops->read_version(nvm); + + return -EOPNOTSUPP; +} + +/** + * tb_nvm_validate() - Validate new NVM image + * @nvm: NVM structure + * + * Runs vendor specific validation over the new NVM image and if all + * checks pass returns %0. As side effect updates @nvm->buf_data_start + * and @nvm->buf_data_size fields to match the actual data to be written + * to the NVM. + * + * If the validation does not pass then returns negative errno. + */ +int tb_nvm_validate(struct tb_nvm *nvm) +{ + const struct tb_nvm_vendor_ops *vops = nvm->vops; + unsigned int image_size; + u8 *buf = nvm->buf; + + if (!buf) + return -EINVAL; + if (!vops) + return -EOPNOTSUPP; + + /* Just do basic image size checks */ + image_size = nvm->buf_data_size; + if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE) + return -EINVAL; + + /* + * Set the default data start in the buffer. The validate method + * below can change this if needed. + */ + nvm->buf_data_start = buf; + + return vops->validate ? vops->validate(nvm) : 0; +} + +/** + * tb_nvm_write_headers() - Write headers before the rest of the image + * @nvm: NVM structure + * + * If the vendor NVM format requires writing headers before the rest of + * the image, this function does that. Can be called even if the device + * does not need this. + * + * Returns %0 in case of success and negative errno otherwise. + */ +int tb_nvm_write_headers(struct tb_nvm *nvm) +{ + const struct tb_nvm_vendor_ops *vops = nvm->vops; + + return vops->write_headers ? vops->write_headers(nvm) : 0; +} + +/** + * tb_nvm_add_active() - Adds active NVMem device to NVM + * @nvm: NVM structure + * @reg_read: Pointer to the function to read the NVM (passed directly to the + * NVMem device) + * + * Registers new active NVmem device for @nvm. The @reg_read is called + * directly from NVMem so it must handle possible concurrent access if + * needed. The first parameter passed to @reg_read is @nvm structure. + * Returns %0 in success and negative errno otherwise. + */ +int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read) +{ + struct nvmem_config config; + struct nvmem_device *nvmem; + + memset(&config, 0, sizeof(config)); + + config.name = "nvm_active"; + config.reg_read = reg_read; + config.read_only = true; + config.id = nvm->id; + config.stride = 4; + config.word_size = 4; + config.size = nvm->active_size; + config.dev = nvm->dev; + config.owner = THIS_MODULE; + config.priv = nvm; + + nvmem = nvmem_register(&config); + if (IS_ERR(nvmem)) + return PTR_ERR(nvmem); + + nvm->active = nvmem; + return 0; +} + +/** + * tb_nvm_write_buf() - Write data to @nvm buffer + * @nvm: NVM structure + * @offset: Offset where to write the data + * @val: Data buffer to write + * @bytes: Number of bytes to write + * + * Helper function to cache the new NVM image before it is actually + * written to the flash. Copies @bytes from @val to @nvm->buf starting + * from @offset. + */ +int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val, + size_t bytes) +{ + if (!nvm->buf) { + nvm->buf = vmalloc(NVM_MAX_SIZE); + if (!nvm->buf) + return -ENOMEM; + } + + nvm->flushed = false; + nvm->buf_data_size = offset + bytes; + memcpy(nvm->buf + offset, val, bytes); + return 0; +} + +/** + * tb_nvm_add_non_active() - Adds non-active NVMem device to NVM + * @nvm: NVM structure + * @reg_write: Pointer to the function to write the NVM (passed directly + * to the NVMem device) + * + * Registers new non-active NVmem device for @nvm. The @reg_write is called + * directly from NVMem so it must handle possible concurrent access if + * needed. The first parameter passed to @reg_write is @nvm structure. + * The size of the NVMem device is set to %NVM_MAX_SIZE. + * + * Returns %0 in success and negative errno otherwise. + */ +int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write) +{ + struct nvmem_config config; + struct nvmem_device *nvmem; + + memset(&config, 0, sizeof(config)); + + config.name = "nvm_non_active"; + config.reg_write = reg_write; + config.root_only = true; + config.id = nvm->id; + config.stride = 4; + config.word_size = 4; + config.size = NVM_MAX_SIZE; + config.dev = nvm->dev; + config.owner = THIS_MODULE; + config.priv = nvm; + + nvmem = nvmem_register(&config); + if (IS_ERR(nvmem)) + return PTR_ERR(nvmem); + + nvm->non_active = nvmem; + return 0; +} + +/** + * tb_nvm_free() - Release NVM and its resources + * @nvm: NVM structure to release + * + * Releases NVM and the NVMem devices if they were registered. + */ +void tb_nvm_free(struct tb_nvm *nvm) +{ + if (nvm) { + nvmem_unregister(nvm->non_active); + nvmem_unregister(nvm->active); + vfree(nvm->buf); + ida_simple_remove(&nvm_ida, nvm->id); + } + kfree(nvm); +} + +/** + * tb_nvm_read_data() - Read data from NVM + * @address: Start address on the flash + * @buf: Buffer where the read data is copied + * @size: Size of the buffer in bytes + * @retries: Number of retries if block read fails + * @read_block: Function that reads block from the flash + * @read_block_data: Data passsed to @read_block + * + * This is a generic function that reads data from NVM or NVM like + * device. + * + * Returns %0 on success and negative errno otherwise. + */ +int tb_nvm_read_data(unsigned int address, void *buf, size_t size, + unsigned int retries, read_block_fn read_block, + void *read_block_data) +{ + do { + unsigned int dwaddress, dwords, offset; + u8 data[NVM_DATA_DWORDS * 4]; + size_t nbytes; + int ret; + + offset = address & 3; + nbytes = min_t(size_t, size + offset, NVM_DATA_DWORDS * 4); + + dwaddress = address / 4; + dwords = ALIGN(nbytes, 4) / 4; + + ret = read_block(read_block_data, dwaddress, data, dwords); + if (ret) { + if (ret != -ENODEV && retries--) + continue; + return ret; + } + + nbytes -= offset; + memcpy(buf, data + offset, nbytes); + + size -= nbytes; + address += nbytes; + buf += nbytes; + } while (size > 0); + + return 0; +} + +/** + * tb_nvm_write_data() - Write data to NVM + * @address: Start address on the flash + * @buf: Buffer where the data is copied from + * @size: Size of the buffer in bytes + * @retries: Number of retries if the block write fails + * @write_block: Function that writes block to the flash + * @write_block_data: Data passwd to @write_block + * + * This is generic function that writes data to NVM or NVM like device. + * + * Returns %0 on success and negative errno otherwise. + */ +int tb_nvm_write_data(unsigned int address, const void *buf, size_t size, + unsigned int retries, write_block_fn write_block, + void *write_block_data) +{ + do { + unsigned int offset, dwaddress; + u8 data[NVM_DATA_DWORDS * 4]; + size_t nbytes; + int ret; + + offset = address & 3; + nbytes = min_t(u32, size + offset, NVM_DATA_DWORDS * 4); + + memcpy(data + offset, buf, nbytes); + + dwaddress = address / 4; + ret = write_block(write_block_data, dwaddress, data, nbytes / 4); + if (ret) { + if (ret == -ETIMEDOUT) { + if (retries--) + continue; + ret = -EIO; + } + return ret; + } + + size -= nbytes; + address += nbytes; + buf += nbytes; + } while (size > 0); + + return 0; +} + +void tb_nvm_exit(void) +{ + ida_destroy(&nvm_ida); +} diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c index ad58559ea88e..ee03fd75a472 100644 --- a/drivers/thunderbolt/path.c +++ b/drivers/thunderbolt/path.c @@ -85,11 +85,12 @@ static int tb_path_find_src_hopid(struct tb_port *src, * @dst_hopid: HopID to the @dst (%-1 if don't care) * @last: Last port is filled here if not %NULL * @name: Name of the path + * @alloc_hopid: Allocate HopIDs for the ports * * Follows a path starting from @src and @src_hopid to the last output - * port of the path. Allocates HopIDs for the visited ports. Call - * tb_path_free() to release the path and allocated HopIDs when the path - * is not needed anymore. + * port of the path. Allocates HopIDs for the visited ports (if + * @alloc_hopid is true). Call tb_path_free() to release the path and + * allocated HopIDs when the path is not needed anymore. * * Note function discovers also incomplete paths so caller should check * that the @dst port is the expected one. If it is not, the path can be @@ -99,7 +100,8 @@ static int tb_path_find_src_hopid(struct tb_port *src, */ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, struct tb_port *dst, int dst_hopid, - struct tb_port **last, const char *name) + struct tb_port **last, const char *name, + bool alloc_hopid) { struct tb_port *out_port; struct tb_regs_hop hop; @@ -156,6 +158,7 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, path->tb = src->sw->tb; path->path_length = num_hops; path->activated = true; + path->alloc_hopid = alloc_hopid; path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL); if (!path->hops) { @@ -163,6 +166,9 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, return NULL; } + tb_dbg(path->tb, "discovering %s path starting from %llx:%u\n", + path->name, tb_route(src->sw), src->port); + p = src; h = src_hopid; @@ -177,13 +183,14 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, goto err; } - if (tb_port_alloc_in_hopid(p, h, h) < 0) + if (alloc_hopid && tb_port_alloc_in_hopid(p, h, h) < 0) goto err; out_port = &sw->ports[hop.out_port]; next_hop = hop.next_hop; - if (tb_port_alloc_out_hopid(out_port, next_hop, next_hop) < 0) { + if (alloc_hopid && + tb_port_alloc_out_hopid(out_port, next_hop, next_hop) < 0) { tb_port_release_in_hopid(p, h); goto err; } @@ -194,10 +201,13 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, path->hops[i].out_port = out_port; path->hops[i].next_hop_index = next_hop; + tb_dump_hop(&path->hops[i], &hop); + h = next_hop; p = out_port->remote; } + tb_dbg(path->tb, "path discovery complete\n"); return path; err: @@ -229,7 +239,7 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, struct tb_port *dst, int dst_hopid, int link_nr, const char *name) { - struct tb_port *in_port, *out_port; + struct tb_port *in_port, *out_port, *first_port, *last_port; int in_hopid, out_hopid; struct tb_path *path; size_t num_hops; @@ -239,12 +249,23 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, if (!path) return NULL; - /* - * Number of hops on a path is the distance between the two - * switches plus the source adapter port. - */ - num_hops = abs(tb_route_length(tb_route(src->sw)) - - tb_route_length(tb_route(dst->sw))) + 1; + first_port = last_port = NULL; + i = 0; + tb_for_each_port_on_path(src, dst, in_port) { + if (!first_port) + first_port = in_port; + last_port = in_port; + i++; + } + + /* Check that src and dst are reachable */ + if (first_port != src || last_port != dst) { + kfree(path); + return NULL; + } + + /* Each hop takes two ports */ + num_hops = i / 2; path->hops = kcalloc(num_hops, sizeof(*path->hops), GFP_KERNEL); if (!path->hops) { @@ -252,6 +273,8 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, return NULL; } + path->alloc_hopid = true; + in_hopid = src_hopid; out_port = NULL; @@ -334,17 +357,19 @@ err: */ void tb_path_free(struct tb_path *path) { - int i; - - for (i = 0; i < path->path_length; i++) { - const struct tb_path_hop *hop = &path->hops[i]; - - if (hop->in_port) - tb_port_release_in_hopid(hop->in_port, - hop->in_hop_index); - if (hop->out_port) - tb_port_release_out_hopid(hop->out_port, - hop->next_hop_index); + if (path->alloc_hopid) { + int i; + + for (i = 0; i < path->path_length; i++) { + const struct tb_path_hop *hop = &path->hops[i]; + + if (hop->in_port) + tb_port_release_in_hopid(hop->in_port, + hop->in_hop_index); + if (hop->out_port) + tb_port_release_out_hopid(hop->out_port, + hop->next_hop_index); + } } kfree(path->hops); @@ -356,7 +381,7 @@ static void __tb_path_deallocate_nfc(struct tb_path *path, int first_hop) int i, res; for (i = first_hop; i < path->path_length; i++) { res = tb_port_add_nfc_credits(path->hops[i].in_port, - -path->nfc_credits); + -path->hops[i].nfc_credits); if (res) tb_port_warn(path->hops[i].in_port, "nfc credits deallocation failed for hop %d\n", @@ -395,10 +420,17 @@ static int __tb_path_deactivate_hop(struct tb_port *port, int hop_index, if (!hop.pending) { if (clear_fc) { - /* Clear flow control */ - hop.ingress_fc = 0; + /* + * Clear flow control. Protocol adapters + * IFC and ISE bits are vendor defined + * in the USB4 spec so we clear them + * only for pre-USB4 adapters. + */ + if (!tb_switch_is_usb4(port->sw)) { + hop.ingress_fc = 0; + hop.ingress_shared_buffer = 0; + } hop.egress_fc = 0; - hop.ingress_shared_buffer = 0; hop.egress_shared_buffer = 0; return tb_port_write(port, &hop, TB_CFG_HOPS, @@ -436,7 +468,7 @@ void tb_path_deactivate(struct tb_path *path) return; } tb_dbg(path->tb, - "deactivating %s path from %llx:%x to %llx:%x\n", + "deactivating %s path from %llx:%u to %llx:%u\n", path->name, tb_route(path->hops[0].in_port->sw), path->hops[0].in_port->port, tb_route(path->hops[path->path_length - 1].out_port->sw), @@ -448,6 +480,7 @@ void tb_path_deactivate(struct tb_path *path) /** * tb_path_activate() - activate a path + * @path: Path to activate * * Activate a path starting with the last hop and iterating backwards. The * caller must fill path->hops before calling tb_path_activate(). @@ -464,7 +497,7 @@ int tb_path_activate(struct tb_path *path) } tb_dbg(path->tb, - "activating %s path from %llx:%x to %llx:%x\n", + "activating %s path from %llx:%u to %llx:%u\n", path->name, tb_route(path->hops[0].in_port->sw), path->hops[0].in_port->port, tb_route(path->hops[path->path_length - 1].out_port->sw), @@ -483,7 +516,7 @@ int tb_path_activate(struct tb_path *path) /* Add non flow controlled credits. */ for (i = path->path_length - 1; i >= 0; i--) { res = tb_port_add_nfc_credits(path->hops[i].in_port, - path->nfc_credits); + path->hops[i].nfc_credits); if (res) { __tb_path_deallocate_nfc(path, i); goto err; @@ -543,6 +576,7 @@ err: /** * tb_path_is_invalid() - check whether any ports on the path are invalid + * @path: Path to check * * Return: Returns true if the path is invalid, false otherwise. */ @@ -559,21 +593,20 @@ bool tb_path_is_invalid(struct tb_path *path) } /** - * tb_path_switch_on_path() - Does the path go through certain switch + * tb_path_port_on_path() - Does the path go through certain port * @path: Path to check - * @sw: Switch to check + * @port: Switch to check * - * Goes over all hops on path and checks if @sw is any of them. + * Goes over all hops on path and checks if @port is any of them. * Direction does not matter. */ -bool tb_path_switch_on_path(const struct tb_path *path, - const struct tb_switch *sw) +bool tb_path_port_on_path(const struct tb_path *path, const struct tb_port *port) { int i; for (i = 0; i < path->path_length; i++) { - if (path->hops[i].in_port->sw == sw || - path->hops[i].out_port->sw == sw) + if (path->hops[i].in_port == port || + path->hops[i].out_port == port) return true; } diff --git a/drivers/thunderbolt/property.c b/drivers/thunderbolt/property.c index d5b0cdb8f0b1..dc555cda98e6 100644 --- a/drivers/thunderbolt/property.c +++ b/drivers/thunderbolt/property.c @@ -502,6 +502,77 @@ ssize_t tb_property_format_dir(const struct tb_property_dir *dir, u32 *block, } /** + * tb_property_copy_dir() - Take a deep copy of directory + * @dir: Directory to copy + * + * This function takes a deep copy of @dir and returns back the copy. In + * case of error returns %NULL. The resulting directory needs to be + * released by calling tb_property_free_dir(). + */ +struct tb_property_dir *tb_property_copy_dir(const struct tb_property_dir *dir) +{ + struct tb_property *property, *p = NULL; + struct tb_property_dir *d; + + if (!dir) + return NULL; + + d = tb_property_create_dir(dir->uuid); + if (!d) + return NULL; + + list_for_each_entry(property, &dir->properties, list) { + struct tb_property *p; + + p = tb_property_alloc(property->key, property->type); + if (!p) + goto err_free; + + p->length = property->length; + + switch (property->type) { + case TB_PROPERTY_TYPE_DIRECTORY: + p->value.dir = tb_property_copy_dir(property->value.dir); + if (!p->value.dir) + goto err_free; + break; + + case TB_PROPERTY_TYPE_DATA: + p->value.data = kmemdup(property->value.data, + property->length * 4, + GFP_KERNEL); + if (!p->value.data) + goto err_free; + break; + + case TB_PROPERTY_TYPE_TEXT: + p->value.text = kzalloc(p->length * 4, GFP_KERNEL); + if (!p->value.text) + goto err_free; + strcpy(p->value.text, property->value.text); + break; + + case TB_PROPERTY_TYPE_VALUE: + p->value.immediate = property->value.immediate; + break; + + default: + break; + } + + list_add_tail(&p->list, &d->properties); + } + + return d; + +err_free: + kfree(p); + tb_property_free_dir(d); + + return NULL; +} + +/** * tb_property_add_immediate() - Add immediate property to directory * @parent: Directory to add the property * @key: Key for the property diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c new file mode 100644 index 000000000000..b5f2ec79c4d6 --- /dev/null +++ b/drivers/thunderbolt/quirks.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Thunderbolt driver - quirks + * + * Copyright (c) 2020 Mario Limonciello <mario.limonciello@dell.com> + */ + +#include "tb.h" + +static void quirk_force_power_link(struct tb_switch *sw) +{ + sw->quirks |= QUIRK_FORCE_POWER_LINK_CONTROLLER; +} + +static void quirk_dp_credit_allocation(struct tb_switch *sw) +{ + if (sw->credit_allocation && sw->min_dp_main_credits == 56) { + sw->min_dp_main_credits = 18; + tb_sw_dbg(sw, "quirked DP main: %u\n", sw->min_dp_main_credits); + } +} + +struct tb_quirk { + u16 hw_vendor_id; + u16 hw_device_id; + u16 vendor; + u16 device; + void (*hook)(struct tb_switch *sw); +}; + +static const struct tb_quirk tb_quirks[] = { + /* Dell WD19TB supports self-authentication on unplug */ + { 0x0000, 0x0000, 0x00d4, 0xb070, quirk_force_power_link }, + { 0x0000, 0x0000, 0x00d4, 0xb071, quirk_force_power_link }, + /* + * Intel Goshen Ridge NVM 27 and before report wrong number of + * DP buffers. + */ + { 0x8087, 0x0b26, 0x0000, 0x0000, quirk_dp_credit_allocation }, +}; + +/** + * tb_check_quirks() - Check for quirks to apply + * @sw: Thunderbolt switch + * + * Apply any quirks for the Thunderbolt controller. + */ +void tb_check_quirks(struct tb_switch *sw) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(tb_quirks); i++) { + const struct tb_quirk *q = &tb_quirks[i]; + + if (q->hw_vendor_id && q->hw_vendor_id != sw->config.vendor_id) + continue; + if (q->hw_device_id && q->hw_device_id != sw->config.device_id) + continue; + if (q->vendor && q->vendor != sw->vendor) + continue; + if (q->device && q->device != sw->device) + continue; + + q->hook(sw); + } +} diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c new file mode 100644 index 000000000000..81252e31014a --- /dev/null +++ b/drivers/thunderbolt/retimer.c @@ -0,0 +1,524 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Thunderbolt/USB4 retimer support. + * + * Copyright (C) 2020, Intel Corporation + * Authors: Kranthi Kuntala <kranthi.kuntala@intel.com> + * Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/delay.h> +#include <linux/pm_runtime.h> +#include <linux/sched/signal.h> + +#include "sb_regs.h" +#include "tb.h" + +#define TB_MAX_RETIMER_INDEX 6 + +/** + * tb_retimer_nvm_read() - Read contents of retimer NVM + * @rt: Retimer device + * @address: NVM address (in bytes) to start reading + * @buf: Data read from NVM is stored here + * @size: Number of bytes to read + * + * Reads retimer NVM and copies the contents to @buf. Returns %0 if the + * read was successful and negative errno in case of failure. + */ +int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf, + size_t size) +{ + return usb4_port_retimer_nvm_read(rt->port, rt->index, address, buf, size); +} + +static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes) +{ + struct tb_nvm *nvm = priv; + struct tb_retimer *rt = tb_to_retimer(nvm->dev); + int ret; + + pm_runtime_get_sync(&rt->dev); + + if (!mutex_trylock(&rt->tb->lock)) { + ret = restart_syscall(); + goto out; + } + + ret = tb_retimer_nvm_read(rt, offset, val, bytes); + mutex_unlock(&rt->tb->lock); + +out: + pm_runtime_mark_last_busy(&rt->dev); + pm_runtime_put_autosuspend(&rt->dev); + + return ret; +} + +static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes) +{ + struct tb_nvm *nvm = priv; + struct tb_retimer *rt = tb_to_retimer(nvm->dev); + int ret = 0; + + if (!mutex_trylock(&rt->tb->lock)) + return restart_syscall(); + + ret = tb_nvm_write_buf(nvm, offset, val, bytes); + mutex_unlock(&rt->tb->lock); + + return ret; +} + +static int tb_retimer_nvm_add(struct tb_retimer *rt) +{ + struct tb_nvm *nvm; + int ret; + + nvm = tb_nvm_alloc(&rt->dev); + if (IS_ERR(nvm)) { + ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm); + goto err_nvm; + } + + ret = tb_nvm_read_version(nvm); + if (ret) + goto err_nvm; + + ret = tb_nvm_add_active(nvm, nvm_read); + if (ret) + goto err_nvm; + + ret = tb_nvm_add_non_active(nvm, nvm_write); + if (ret) + goto err_nvm; + + rt->nvm = nvm; + return 0; + +err_nvm: + dev_dbg(&rt->dev, "NVM upgrade disabled\n"); + if (!IS_ERR(nvm)) + tb_nvm_free(nvm); + + return ret; +} + +static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt) +{ + unsigned int image_size; + const u8 *buf; + int ret; + + ret = tb_nvm_validate(rt->nvm); + if (ret) + return ret; + + buf = rt->nvm->buf_data_start; + image_size = rt->nvm->buf_data_size; + + ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf, + image_size); + if (ret) + return ret; + + rt->nvm->flushed = true; + return 0; +} + +static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only) +{ + u32 status; + int ret; + + if (auth_only) { + ret = usb4_port_retimer_nvm_set_offset(rt->port, rt->index, 0); + if (ret) + return ret; + } + + ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index); + if (ret) + return ret; + + usleep_range(100, 150); + + /* + * Check the status now if we still can access the retimer. It + * is expected that the below fails. + */ + ret = usb4_port_retimer_nvm_authenticate_status(rt->port, rt->index, + &status); + if (!ret) { + rt->auth_status = status; + return status ? -EINVAL : 0; + } + + return 0; +} + +static ssize_t device_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_retimer *rt = tb_to_retimer(dev); + + return sysfs_emit(buf, "%#x\n", rt->device); +} +static DEVICE_ATTR_RO(device); + +static ssize_t nvm_authenticate_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tb_retimer *rt = tb_to_retimer(dev); + int ret; + + if (!mutex_trylock(&rt->tb->lock)) + return restart_syscall(); + + if (!rt->nvm) + ret = -EAGAIN; + else if (rt->no_nvm_upgrade) + ret = -EOPNOTSUPP; + else + ret = sysfs_emit(buf, "%#x\n", rt->auth_status); + + mutex_unlock(&rt->tb->lock); + + return ret; +} + +static ssize_t nvm_authenticate_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct tb_retimer *rt = tb_to_retimer(dev); + int val, ret; + + pm_runtime_get_sync(&rt->dev); + + if (!mutex_trylock(&rt->tb->lock)) { + ret = restart_syscall(); + goto exit_rpm; + } + + if (!rt->nvm) { + ret = -EAGAIN; + goto exit_unlock; + } + + ret = kstrtoint(buf, 10, &val); + if (ret) + goto exit_unlock; + + /* Always clear status */ + rt->auth_status = 0; + + if (val) { + if (val == AUTHENTICATE_ONLY) { + ret = tb_retimer_nvm_authenticate(rt, true); + } else { + if (!rt->nvm->flushed) { + if (!rt->nvm->buf) { + ret = -EINVAL; + goto exit_unlock; + } + + ret = tb_retimer_nvm_validate_and_write(rt); + if (ret || val == WRITE_ONLY) + goto exit_unlock; + } + if (val == WRITE_AND_AUTHENTICATE) + ret = tb_retimer_nvm_authenticate(rt, false); + } + } + +exit_unlock: + mutex_unlock(&rt->tb->lock); +exit_rpm: + pm_runtime_mark_last_busy(&rt->dev); + pm_runtime_put_autosuspend(&rt->dev); + + if (ret) + return ret; + return count; +} +static DEVICE_ATTR_RW(nvm_authenticate); + +static ssize_t nvm_version_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tb_retimer *rt = tb_to_retimer(dev); + int ret; + + if (!mutex_trylock(&rt->tb->lock)) + return restart_syscall(); + + if (!rt->nvm) + ret = -EAGAIN; + else + ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor); + + mutex_unlock(&rt->tb->lock); + return ret; +} +static DEVICE_ATTR_RO(nvm_version); + +static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_retimer *rt = tb_to_retimer(dev); + + return sysfs_emit(buf, "%#x\n", rt->vendor); +} +static DEVICE_ATTR_RO(vendor); + +static struct attribute *retimer_attrs[] = { + &dev_attr_device.attr, + &dev_attr_nvm_authenticate.attr, + &dev_attr_nvm_version.attr, + &dev_attr_vendor.attr, + NULL +}; + +static const struct attribute_group retimer_group = { + .attrs = retimer_attrs, +}; + +static const struct attribute_group *retimer_groups[] = { + &retimer_group, + NULL +}; + +static void tb_retimer_release(struct device *dev) +{ + struct tb_retimer *rt = tb_to_retimer(dev); + + kfree(rt); +} + +struct device_type tb_retimer_type = { + .name = "thunderbolt_retimer", + .groups = retimer_groups, + .release = tb_retimer_release, +}; + +static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status) +{ + struct tb_retimer *rt; + u32 vendor, device; + int ret; + + ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor, + sizeof(vendor)); + if (ret) { + if (ret != -ENODEV) + tb_port_warn(port, "failed read retimer VendorId: %d\n", ret); + return ret; + } + + ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device, + sizeof(device)); + if (ret) { + if (ret != -ENODEV) + tb_port_warn(port, "failed read retimer ProductId: %d\n", ret); + return ret; + } + + if (vendor != PCI_VENDOR_ID_INTEL && vendor != 0x8087) { + tb_port_info(port, "retimer NVM format of vendor %#x is not supported\n", + vendor); + return -EOPNOTSUPP; + } + + /* + * Check that it supports NVM operations. If not then don't add + * the device at all. + */ + ret = usb4_port_retimer_nvm_sector_size(port, index); + if (ret < 0) + return ret; + + rt = kzalloc(sizeof(*rt), GFP_KERNEL); + if (!rt) + return -ENOMEM; + + rt->index = index; + rt->vendor = vendor; + rt->device = device; + rt->auth_status = auth_status; + rt->port = port; + rt->tb = port->sw->tb; + + rt->dev.parent = &port->usb4->dev; + rt->dev.bus = &tb_bus_type; + rt->dev.type = &tb_retimer_type; + dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev), + port->port, index); + + ret = device_register(&rt->dev); + if (ret) { + dev_err(&rt->dev, "failed to register retimer: %d\n", ret); + put_device(&rt->dev); + return ret; + } + + ret = tb_retimer_nvm_add(rt); + if (ret) { + dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret); + device_unregister(&rt->dev); + return ret; + } + + dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n", + rt->vendor, rt->device); + + pm_runtime_no_callbacks(&rt->dev); + pm_runtime_set_active(&rt->dev); + pm_runtime_enable(&rt->dev); + pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY); + pm_runtime_mark_last_busy(&rt->dev); + pm_runtime_use_autosuspend(&rt->dev); + + return 0; +} + +static void tb_retimer_remove(struct tb_retimer *rt) +{ + dev_info(&rt->dev, "retimer disconnected\n"); + tb_nvm_free(rt->nvm); + device_unregister(&rt->dev); +} + +struct tb_retimer_lookup { + const struct tb_port *port; + u8 index; +}; + +static int retimer_match(struct device *dev, void *data) +{ + const struct tb_retimer_lookup *lookup = data; + struct tb_retimer *rt = tb_to_retimer(dev); + + return rt && rt->port == lookup->port && rt->index == lookup->index; +} + +static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index) +{ + struct tb_retimer_lookup lookup = { .port = port, .index = index }; + struct device *dev; + + dev = device_find_child(&port->usb4->dev, &lookup, retimer_match); + if (dev) + return tb_to_retimer(dev); + + return NULL; +} + +/** + * tb_retimer_scan() - Scan for on-board retimers under port + * @port: USB4 port to scan + * @add: If true also registers found retimers + * + * Brings the sideband into a state where retimers can be accessed. + * Then Tries to enumerate on-board retimers connected to @port. Found + * retimers are registered as children of @port if @add is set. Does + * not scan for cable retimers for now. + */ +int tb_retimer_scan(struct tb_port *port, bool add) +{ + u32 status[TB_MAX_RETIMER_INDEX + 1] = {}; + int ret, i, last_idx = 0; + struct usb4_port *usb4; + + usb4 = port->usb4; + if (!usb4) + return 0; + + pm_runtime_get_sync(&usb4->dev); + + /* + * Send broadcast RT to make sure retimer indices facing this + * port are set. + */ + ret = usb4_port_enumerate_retimers(port); + if (ret) + goto out; + + /* + * Enable sideband channel for each retimer. We can do this + * regardless whether there is device connected or not. + */ + for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) + usb4_port_retimer_set_inbound_sbtx(port, i); + + /* + * Before doing anything else, read the authentication status. + * If the retimer has it set, store it for the new retimer + * device instance. + */ + for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) + usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]); + + for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) { + /* + * Last retimer is true only for the last on-board + * retimer (the one connected directly to the Type-C + * port). + */ + ret = usb4_port_retimer_is_last(port, i); + if (ret > 0) + last_idx = i; + else if (ret < 0) + break; + } + + if (!last_idx) { + ret = 0; + goto out; + } + + /* Add on-board retimers if they do not exist already */ + for (i = 1; i <= last_idx; i++) { + struct tb_retimer *rt; + + rt = tb_port_find_retimer(port, i); + if (rt) { + put_device(&rt->dev); + } else if (add) { + ret = tb_retimer_add(port, i, status[i]); + if (ret && ret != -EOPNOTSUPP) + break; + } + } + +out: + pm_runtime_mark_last_busy(&usb4->dev); + pm_runtime_put_autosuspend(&usb4->dev); + + return ret; +} + +static int remove_retimer(struct device *dev, void *data) +{ + struct tb_retimer *rt = tb_to_retimer(dev); + struct tb_port *port = data; + + if (rt && rt->port == port) + tb_retimer_remove(rt); + return 0; +} + +/** + * tb_retimer_remove_all() - Remove all retimers under port + * @port: USB4 port whose retimers to remove + * + * This removes all previously added retimers under @port. + */ +void tb_retimer_remove_all(struct tb_port *port) +{ + struct usb4_port *usb4; + + usb4 = port->usb4; + if (usb4) + device_for_each_child_reverse(&usb4->dev, port, + remove_retimer); +} diff --git a/drivers/thunderbolt/sb_regs.h b/drivers/thunderbolt/sb_regs.h new file mode 100644 index 000000000000..5185cf3e4d97 --- /dev/null +++ b/drivers/thunderbolt/sb_regs.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * USB4 port sideband registers found on routers and retimers + * + * Copyright (C) 2020, Intel Corporation + * Authors: Mika Westerberg <mika.westerberg@linux.intel.com> + * Rajmohan Mani <rajmohan.mani@intel.com> + */ + +#ifndef _SB_REGS +#define _SB_REGS + +#define USB4_SB_VENDOR_ID 0x00 +#define USB4_SB_PRODUCT_ID 0x01 +#define USB4_SB_OPCODE 0x08 + +enum usb4_sb_opcode { + USB4_SB_OPCODE_ERR = 0x20525245, /* "ERR " */ + USB4_SB_OPCODE_ONS = 0x444d4321, /* "!CMD" */ + USB4_SB_OPCODE_ROUTER_OFFLINE = 0x4e45534c, /* "LSEN" */ + USB4_SB_OPCODE_ENUMERATE_RETIMERS = 0x4d554e45, /* "ENUM" */ + USB4_SB_OPCODE_SET_INBOUND_SBTX = 0x5055534c, /* "LSUP" */ + USB4_SB_OPCODE_QUERY_LAST_RETIMER = 0x5453414c, /* "LAST" */ + USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE = 0x53534e47, /* "GNSS" */ + USB4_SB_OPCODE_NVM_SET_OFFSET = 0x53504f42, /* "BOPS" */ + USB4_SB_OPCODE_NVM_BLOCK_WRITE = 0x574b4c42, /* "BLKW" */ + USB4_SB_OPCODE_NVM_AUTH_WRITE = 0x48545541, /* "AUTH" */ + USB4_SB_OPCODE_NVM_READ = 0x52524641, /* "AFRR" */ + USB4_SB_OPCODE_READ_LANE_MARGINING_CAP = 0x50434452, /* "RDCP" */ + USB4_SB_OPCODE_RUN_HW_LANE_MARGINING = 0x474d4852, /* "RHMG" */ + USB4_SB_OPCODE_RUN_SW_LANE_MARGINING = 0x474d5352, /* "RSMG" */ + USB4_SB_OPCODE_READ_SW_MARGIN_ERR = 0x57534452, /* "RDSW" */ +}; + +#define USB4_SB_METADATA 0x09 +#define USB4_SB_METADATA_NVM_AUTH_WRITE_MASK GENMASK(5, 0) +#define USB4_SB_DATA 0x12 + +/* USB4_SB_OPCODE_READ_LANE_MARGINING_CAP */ +#define USB4_MARGIN_CAP_0_MODES_HW BIT(0) +#define USB4_MARGIN_CAP_0_MODES_SW BIT(1) +#define USB4_MARGIN_CAP_0_2_LANES BIT(2) +#define USB4_MARGIN_CAP_0_VOLTAGE_INDP_MASK GENMASK(4, 3) +#define USB4_MARGIN_CAP_0_VOLTAGE_INDP_SHIFT 3 +#define USB4_MARGIN_CAP_0_VOLTAGE_MIN 0x0 +#define USB4_MARGIN_CAP_0_VOLTAGE_HL 0x1 +#define USB4_MARGIN_CAP_0_VOLTAGE_BOTH 0x2 +#define USB4_MARGIN_CAP_0_TIME BIT(5) +#define USB4_MARGIN_CAP_0_VOLTAGE_STEPS_MASK GENMASK(12, 6) +#define USB4_MARGIN_CAP_0_VOLTAGE_STEPS_SHIFT 6 +#define USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_MASK GENMASK(18, 13) +#define USB4_MARGIN_CAP_0_MAX_VOLTAGE_OFFSET_SHIFT 13 +#define USB4_MARGIN_CAP_1_TIME_DESTR BIT(8) +#define USB4_MARGIN_CAP_1_TIME_INDP_MASK GENMASK(10, 9) +#define USB4_MARGIN_CAP_1_TIME_INDP_SHIFT 9 +#define USB4_MARGIN_CAP_1_TIME_MIN 0x0 +#define USB4_MARGIN_CAP_1_TIME_LR 0x1 +#define USB4_MARGIN_CAP_1_TIME_BOTH 0x2 +#define USB4_MARGIN_CAP_1_TIME_STEPS_MASK GENMASK(15, 11) +#define USB4_MARGIN_CAP_1_TIME_STEPS_SHIFT 11 +#define USB4_MARGIN_CAP_1_TIME_OFFSET_MASK GENMASK(20, 16) +#define USB4_MARGIN_CAP_1_TIME_OFFSET_SHIFT 16 +#define USB4_MARGIN_CAP_1_MIN_BER_MASK GENMASK(25, 21) +#define USB4_MARGIN_CAP_1_MIN_BER_SHIFT 21 +#define USB4_MARGIN_CAP_1_MAX_BER_MASK GENMASK(30, 26) +#define USB4_MARGIN_CAP_1_MAX_BER_SHIFT 26 +#define USB4_MARGIN_CAP_1_MAX_BER_SHIFT 26 + +/* USB4_SB_OPCODE_RUN_HW_LANE_MARGINING */ +#define USB4_MARGIN_HW_TIME BIT(3) +#define USB4_MARGIN_HW_RH BIT(4) +#define USB4_MARGIN_HW_BER_MASK GENMASK(9, 5) +#define USB4_MARGIN_HW_BER_SHIFT 5 + +/* Applicable to all margin values */ +#define USB4_MARGIN_HW_RES_1_MARGIN_MASK GENMASK(6, 0) +#define USB4_MARGIN_HW_RES_1_EXCEEDS BIT(7) +/* Different lane margin shifts */ +#define USB4_MARGIN_HW_RES_1_L0_LL_MARGIN_SHIFT 8 +#define USB4_MARGIN_HW_RES_1_L1_RH_MARGIN_SHIFT 16 +#define USB4_MARGIN_HW_RES_1_L1_LL_MARGIN_SHIFT 24 + +/* USB4_SB_OPCODE_RUN_SW_LANE_MARGINING */ +#define USB4_MARGIN_SW_TIME BIT(3) +#define USB4_MARGIN_SW_RH BIT(4) +#define USB4_MARGIN_SW_COUNTER_MASK GENMASK(14, 13) +#define USB4_MARGIN_SW_COUNTER_SHIFT 13 +#define USB4_MARGIN_SW_COUNTER_NOP 0x0 +#define USB4_MARGIN_SW_COUNTER_CLEAR 0x1 +#define USB4_MARGIN_SW_COUNTER_START 0x2 +#define USB4_MARGIN_SW_COUNTER_STOP 0x3 + +#endif diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index a2ce99051c51..60da5c23ccaf 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -13,28 +13,22 @@ #include <linux/sched/signal.h> #include <linux/sizes.h> #include <linux/slab.h> -#include <linux/vmalloc.h> +#include <linux/module.h> #include "tb.h" /* Switch NVM support */ -#define NVM_DEVID 0x05 -#define NVM_VERSION 0x08 -#define NVM_CSS 0x10 -#define NVM_FLASH_SIZE 0x45 - -#define NVM_MIN_SIZE SZ_32K -#define NVM_MAX_SIZE SZ_512K - -static DEFINE_IDA(nvm_ida); - struct nvm_auth_status { struct list_head list; uuid_t uuid; u32 status; }; +static bool clx_enabled = true; +module_param_named(clx, clx_enabled, bool, 0444); +MODULE_PARM_DESC(clx, "allow low power states on the high-speed lanes (default: true)"); + /* * Hold NVM authentication failure status per switch This information * needs to stay around even when the switch gets power cycled so we @@ -106,66 +100,30 @@ static void nvm_clear_auth_status(const struct tb_switch *sw) static int nvm_validate_and_write(struct tb_switch *sw) { - unsigned int image_size, hdr_size; - const u8 *buf = sw->nvm->buf; - u16 ds_size; + unsigned int image_size; + const u8 *buf; int ret; - if (!buf) - return -EINVAL; - - image_size = sw->nvm->buf_data_size; - if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE) - return -EINVAL; - - /* - * FARB pointer must point inside the image and must at least - * contain parts of the digital section we will be reading here. - */ - hdr_size = (*(u32 *)buf) & 0xffffff; - if (hdr_size + NVM_DEVID + 2 >= image_size) - return -EINVAL; - - /* Digital section start should be aligned to 4k page */ - if (!IS_ALIGNED(hdr_size, SZ_4K)) - return -EINVAL; - - /* - * Read digital section size and check that it also fits inside - * the image. - */ - ds_size = *(u16 *)(buf + hdr_size); - if (ds_size >= image_size) - return -EINVAL; - - if (!sw->safe_mode) { - u16 device_id; + ret = tb_nvm_validate(sw->nvm); + if (ret) + return ret; - /* - * Make sure the device ID in the image matches the one - * we read from the switch config space. - */ - device_id = *(u16 *)(buf + hdr_size + NVM_DEVID); - if (device_id != sw->config.device_id) - return -EINVAL; - - if (sw->generation < 3) { - /* Write CSS headers first */ - ret = dma_port_flash_write(sw->dma_port, - DMA_PORT_CSS_ADDRESS, buf + NVM_CSS, - DMA_PORT_CSS_MAX_SIZE); - if (ret) - return ret; - } + ret = tb_nvm_write_headers(sw->nvm); + if (ret) + return ret; - /* Skip headers in the image */ - buf += hdr_size; - image_size -= hdr_size; - } + buf = sw->nvm->buf_data_start; + image_size = sw->nvm->buf_data_size; if (tb_switch_is_usb4(sw)) - return usb4_switch_nvm_write(sw, 0, buf, image_size); - return dma_port_flash_write(sw->dma_port, 0, buf, image_size); + ret = usb4_switch_nvm_write(sw, 0, buf, image_size); + else + ret = dma_port_flash_write(sw->dma_port, 0, buf, image_size); + if (ret) + return ret; + + sw->nvm->flushed = true; + return 0; } static int nvm_authenticate_host_dma_port(struct tb_switch *sw) @@ -263,7 +221,7 @@ static void nvm_authenticate_start_dma_port(struct tb_switch *sw) * itself. To be on the safe side keep the root port in D0 during * the whole upgrade process. */ - root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); + root_port = pcie_find_root_port(sw->tb->nhi->pdev); if (root_port) pm_runtime_get_noresume(&root_port->dev); } @@ -272,7 +230,7 @@ static void nvm_authenticate_complete_dma_port(struct tb_switch *sw) { struct pci_dev *root_port; - root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); + root_port = pcie_find_root_port(sw->tb->nhi->pdev); if (root_port) pm_runtime_put(&root_port->dev); } @@ -300,21 +258,23 @@ static inline bool nvm_upgradeable(struct tb_switch *sw) return nvm_readable(sw); } -static inline int nvm_read(struct tb_switch *sw, unsigned int address, - void *buf, size_t size) -{ - if (tb_switch_is_usb4(sw)) - return usb4_switch_nvm_read(sw, address, buf, size); - return dma_port_flash_read(sw->dma_port, address, buf, size); -} - -static int nvm_authenticate(struct tb_switch *sw) +static int nvm_authenticate(struct tb_switch *sw, bool auth_only) { int ret; - if (tb_switch_is_usb4(sw)) + if (tb_switch_is_usb4(sw)) { + if (auth_only) { + ret = usb4_switch_nvm_set_offset(sw, 0); + if (ret) + return ret; + } + sw->nvm->authenticating = true; return usb4_switch_nvm_authenticate(sw); + } else if (auth_only) { + return -EOPNOTSUPP; + } + sw->nvm->authenticating = true; if (!tb_route(sw)) { nvm_authenticate_start_dma_port(sw); ret = nvm_authenticate_host_dma_port(sw); @@ -325,10 +285,29 @@ static int nvm_authenticate(struct tb_switch *sw) return ret; } -static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val, - size_t bytes) +/** + * tb_switch_nvm_read() - Read router NVM + * @sw: Router whose NVM to read + * @address: Start address on the NVM + * @buf: Buffer where the read data is copied + * @size: Size of the buffer in bytes + * + * Reads from router NVM and returns the requested data in @buf. Locking + * is up to the caller. Returns %0 in success and negative errno in case + * of failure. + */ +int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, + size_t size) +{ + if (tb_switch_is_usb4(sw)) + return usb4_switch_nvm_read(sw, address, buf, size); + return dma_port_flash_read(sw->dma_port, address, buf, size); +} + +static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes) { - struct tb_switch *sw = priv; + struct tb_nvm *nvm = priv; + struct tb_switch *sw = tb_to_switch(nvm->dev); int ret; pm_runtime_get_sync(&sw->dev); @@ -338,7 +317,7 @@ static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val, goto out; } - ret = nvm_read(sw, offset, val, bytes); + ret = tb_switch_nvm_read(sw, offset, val, bytes); mutex_unlock(&sw->tb->lock); out: @@ -348,17 +327,11 @@ out: return ret; } -static int tb_switch_nvm_no_read(void *priv, unsigned int offset, void *val, - size_t bytes) +static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes) { - return -EPERM; -} - -static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, - size_t bytes) -{ - struct tb_switch *sw = priv; - int ret = 0; + struct tb_nvm *nvm = priv; + struct tb_switch *sw = tb_to_switch(nvm->dev); + int ret; if (!mutex_trylock(&sw->tb->lock)) return restart_syscall(); @@ -369,79 +342,29 @@ static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val, * locally here and handle the special cases when the user asks * us to authenticate the image. */ - if (!sw->nvm->buf) { - sw->nvm->buf = vmalloc(NVM_MAX_SIZE); - if (!sw->nvm->buf) { - ret = -ENOMEM; - goto unlock; - } - } - - sw->nvm->buf_data_size = offset + bytes; - memcpy(sw->nvm->buf + offset, val, bytes); - -unlock: + ret = tb_nvm_write_buf(nvm, offset, val, bytes); mutex_unlock(&sw->tb->lock); return ret; } -static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id, - size_t size, bool active) -{ - struct nvmem_config config; - - memset(&config, 0, sizeof(config)); - - if (active) { - config.name = "nvm_active"; - config.reg_read = tb_switch_nvm_read; - config.read_only = true; - } else { - config.name = "nvm_non_active"; - config.reg_read = tb_switch_nvm_no_read; - config.reg_write = tb_switch_nvm_write; - config.root_only = true; - } - - config.id = id; - config.stride = 4; - config.word_size = 4; - config.size = size; - config.dev = &sw->dev; - config.owner = THIS_MODULE; - config.priv = sw; - - return nvmem_register(&config); -} - static int tb_switch_nvm_add(struct tb_switch *sw) { - struct nvmem_device *nvm_dev; - struct tb_switch_nvm *nvm; - u32 val; + struct tb_nvm *nvm; int ret; if (!nvm_readable(sw)) return 0; - /* - * The NVM format of non-Intel hardware is not known so - * currently restrict NVM upgrade for Intel hardware. We may - * relax this in the future when we learn other NVM formats. - */ - if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) { - dev_info(&sw->dev, - "NVM format of vendor %#x is not known, disabling NVM upgrade\n", - sw->config.vendor_id); - return 0; + nvm = tb_nvm_alloc(&sw->dev); + if (IS_ERR(nvm)) { + ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm); + goto err_nvm; } - nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); - if (!nvm) - return -ENOMEM; - - nvm->id = ida_simple_get(&nvm_ida, 0, 0, GFP_KERNEL); + ret = tb_nvm_read_version(nvm); + if (ret) + goto err_nvm; /* * If the switch is in safe-mode the only accessible portion of @@ -449,56 +372,32 @@ static int tb_switch_nvm_add(struct tb_switch *sw) * write new functional NVM. */ if (!sw->safe_mode) { - u32 nvm_size, hdr_size; - - ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val)); + ret = tb_nvm_add_active(nvm, nvm_read); if (ret) - goto err_ida; - - hdr_size = sw->generation < 3 ? SZ_8K : SZ_16K; - nvm_size = (SZ_1M << (val & 7)) / 8; - nvm_size = (nvm_size - hdr_size) / 2; - - ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val)); - if (ret) - goto err_ida; - - nvm->major = val >> 16; - nvm->minor = val >> 8; - - nvm_dev = register_nvmem(sw, nvm->id, nvm_size, true); - if (IS_ERR(nvm_dev)) { - ret = PTR_ERR(nvm_dev); - goto err_ida; - } - nvm->active = nvm_dev; + goto err_nvm; } if (!sw->no_nvm_upgrade) { - nvm_dev = register_nvmem(sw, nvm->id, NVM_MAX_SIZE, false); - if (IS_ERR(nvm_dev)) { - ret = PTR_ERR(nvm_dev); - goto err_nvm_active; - } - nvm->non_active = nvm_dev; + ret = tb_nvm_add_non_active(nvm, nvm_write); + if (ret) + goto err_nvm; } sw->nvm = nvm; return 0; -err_nvm_active: - if (nvm->active) - nvmem_unregister(nvm->active); -err_ida: - ida_simple_remove(&nvm_ida, nvm->id); - kfree(nvm); +err_nvm: + tb_sw_dbg(sw, "NVM upgrade disabled\n"); + sw->no_nvm_upgrade = true; + if (!IS_ERR(nvm)) + tb_nvm_free(nvm); return ret; } static void tb_switch_nvm_remove(struct tb_switch *sw) { - struct tb_switch_nvm *nvm; + struct tb_nvm *nvm; nvm = sw->nvm; sw->nvm = NULL; @@ -510,18 +409,12 @@ static void tb_switch_nvm_remove(struct tb_switch *sw) if (!nvm->authenticating) nvm_clear_auth_status(sw); - if (nvm->non_active) - nvmem_unregister(nvm->non_active); - if (nvm->active) - nvmem_unregister(nvm->active); - ida_simple_remove(&nvm_ida, nvm->id); - vfree(nvm->buf); - kfree(nvm); + tb_nvm_free(nvm); } /* port utility functions */ -static const char *tb_port_type(struct tb_regs_port_header *port) +static const char *tb_port_type(const struct tb_regs_port_header *port) { switch (port->type >> 16) { case 0: @@ -550,27 +443,32 @@ static const char *tb_port_type(struct tb_regs_port_header *port) } } -static void tb_dump_port(struct tb *tb, struct tb_regs_port_header *port) +static void tb_dump_port(struct tb *tb, const struct tb_port *port) { + const struct tb_regs_port_header *regs = &port->config; + tb_dbg(tb, " Port %d: %x:%x (Revision: %d, TB Version: %d, Type: %s (%#x))\n", - port->port_number, port->vendor_id, port->device_id, - port->revision, port->thunderbolt_version, tb_port_type(port), - port->type); + regs->port_number, regs->vendor_id, regs->device_id, + regs->revision, regs->thunderbolt_version, tb_port_type(regs), + regs->type); tb_dbg(tb, " Max hop id (in/out): %d/%d\n", - port->max_in_hop_id, port->max_out_hop_id); - tb_dbg(tb, " Max counters: %d\n", port->max_counters); - tb_dbg(tb, " NFC Credits: %#x\n", port->nfc_credits); + regs->max_in_hop_id, regs->max_out_hop_id); + tb_dbg(tb, " Max counters: %d\n", regs->max_counters); + tb_dbg(tb, " NFC Credits: %#x\n", regs->nfc_credits); + tb_dbg(tb, " Credits (total/control): %u/%u\n", port->total_credits, + port->ctl_credits); } /** * tb_port_state() - get connectedness state of a port + * @port: the port to check * * The port must have a TB_CAP_PHY (i.e. it should be a real port). * * Return: Returns an enum tb_port_state on success or an error code on failure. */ -static int tb_port_state(struct tb_port *port) +int tb_port_state(struct tb_port *port) { struct tb_cap_phy phy; int res; @@ -586,6 +484,8 @@ static int tb_port_state(struct tb_port *port) /** * tb_wait_for_port() - wait for a port to become ready + * @port: Port to wait + * @wait_if_unplugged: Wait also when port is unplugged * * Wait up to 1 second for a port to reach state TB_PORT_UP. If * wait_if_unplugged is set then we also wait if the port is in state @@ -650,6 +550,8 @@ int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged) /** * tb_port_add_nfc_credits() - add/remove non flow controlled credits to port + * @port: Port to add/remove NFC credits + * @credits: Credits to add/remove * * Change the number of NFC credits allocated to @port by @credits. To remove * NFC credits pass a negative amount of credits. @@ -663,7 +565,17 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits) if (credits == 0 || port->sw->is_unplugged) return 0; + /* + * USB4 restricts programming NFC buffers to lane adapters only + * so skip other ports. + */ + if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port)) + return 0; + nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; + if (credits < 0) + credits = max_t(int, -nfc_credits, credits); + nfc_credits += credits; tb_port_dbg(port, "adding %d NFC credits to %lu", credits, @@ -677,29 +589,9 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits) } /** - * tb_port_set_initial_credits() - Set initial port link credits allocated - * @port: Port to set the initial credits - * @credits: Number of credits to to allocate - * - * Set initial credits value to be used for ingress shared buffering. - */ -int tb_port_set_initial_credits(struct tb_port *port, u32 credits) -{ - u32 data; - int ret; - - ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1); - if (ret) - return ret; - - data &= ~ADP_CS_5_LCA_MASK; - data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK; - - return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1); -} - -/** * tb_port_clear_counter() - clear a counter in TB_CFG_COUNTER + * @port: Port whose counters to clear + * @counter: Counter index to clear * * Return: Returns 0 on success or an error code on failure. */ @@ -728,7 +620,57 @@ int tb_port_unlock(struct tb_port *port) return 0; } +static int __tb_port_enable(struct tb_port *port, bool enable) +{ + int ret; + u32 phy; + + if (!tb_port_is_null(port)) + return -EINVAL; + + ret = tb_port_read(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + if (enable) + phy &= ~LANE_ADP_CS_1_LD; + else + phy |= LANE_ADP_CS_1_LD; + + + ret = tb_port_write(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + tb_port_dbg(port, "lane %sabled\n", enable ? "en" : "dis"); + return 0; +} + /** + * tb_port_enable() - Enable lane adapter + * @port: Port to enable (can be %NULL) + * + * This is used for lane 0 and 1 adapters to enable it. + */ +int tb_port_enable(struct tb_port *port) +{ + return __tb_port_enable(port, true); +} + +/** + * tb_port_disable() - Disable lane adapter + * @port: Port to disable (can be %NULL) + * + * This is used for lane 0 and 1 adapters to disable it. + */ +int tb_port_disable(struct tb_port *port) +{ + return __tb_port_enable(port, false); +} + +/* * tb_init_port() - initialize a port * * This is a helper method for tb_switch_alloc. Does not check or initialize @@ -741,18 +683,25 @@ static int tb_init_port(struct tb_port *port) int res; int cap; + INIT_LIST_HEAD(&port->list); + + /* Control adapter does not have configuration space */ + if (!port->port) + return 0; + res = tb_port_read(port, &port->config, TB_CFG_PORT, 0, 8); if (res) { if (res == -ENODEV) { tb_dbg(port->sw->tb, " Port %d: not implemented\n", port->port); + port->disabled = true; return 0; } return res; } /* Port 0 is the switch itself and has no PHY. */ - if (port->config.type == TB_TYPE_PORT && port->port != 0) { + if (port->config.type == TB_TYPE_PORT) { cap = tb_port_find_cap(port, TB_PORT_CAP_PHY); if (cap > 0) @@ -763,23 +712,33 @@ static int tb_init_port(struct tb_port *port) cap = tb_port_find_cap(port, TB_PORT_CAP_USB4); if (cap > 0) port->cap_usb4 = cap; - } else if (port->port != 0) { + + /* + * USB4 ports the buffers allocated for the control path + * can be read from the path config space. Legacy + * devices we use hard-coded value. + */ + if (tb_switch_is_usb4(port->sw)) { + struct tb_regs_hop hop; + + if (!tb_port_read(port, &hop, TB_CFG_HOPS, 0, 2)) + port->ctl_credits = hop.initial_credits; + } + if (!port->ctl_credits) + port->ctl_credits = 2; + + } else { cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP); if (cap > 0) port->cap_adap = cap; } - tb_dump_port(port->sw->tb, &port->config); + port->total_credits = + (port->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> + ADP_CS_4_TOTAL_BUFFERS_SHIFT; - /* Control port does not need HopID allocation */ - if (port->port) { - ida_init(&port->in_hopids); - ida_init(&port->out_hopids); - } - - INIT_LIST_HEAD(&port->list); + tb_dump_port(port->sw->tb, port); return 0; - } static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid, @@ -796,8 +755,11 @@ static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid, ida = &port->out_hopids; } - /* HopIDs 0-7 are reserved */ - if (min_hopid < TB_PATH_MIN_HOPID) + /* + * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are + * reserved. + */ + if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID) min_hopid = TB_PATH_MIN_HOPID; if (max_hopid < 0 || max_hopid > port_max_hopid) @@ -854,6 +816,13 @@ void tb_port_release_out_hopid(struct tb_port *port, int hopid) ida_simple_remove(&port->out_hopids, hopid); } +static inline bool tb_switch_is_reachable(const struct tb_switch *parent, + const struct tb_switch *sw) +{ + u64 mask = (1ULL << parent->config.depth * 8) - 1; + return (tb_route(parent) & mask) == (tb_route(sw) & mask); +} + /** * tb_next_port_on_path() - Return next port for given port on a path * @start: Start port of the walk @@ -883,12 +852,12 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, return end; } - if (start->sw->config.depth < end->sw->config.depth) { + if (tb_switch_is_reachable(prev->sw, end->sw)) { + next = tb_port_at(tb_route(end->sw), prev->sw); + /* Walk down the topology if next == prev */ if (prev->remote && - prev->remote->sw->config.depth > prev->sw->config.depth) + (next == prev || next->dual_link_port == prev)) next = prev->remote; - else - next = tb_port_at(tb_route(end->sw), prev->sw); } else { if (tb_is_upstream_port(prev)) { next = prev->remote; @@ -905,10 +874,16 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, } } - return next; + return next != prev ? next : NULL; } -static int tb_port_get_link_speed(struct tb_port *port) +/** + * tb_port_get_link_speed() - Get current link speed + * @port: Port to check (USB4 or CIO) + * + * Returns link speed in Gb/s or negative errno in case of failure. + */ +int tb_port_get_link_speed(struct tb_port *port) { u32 val, speed; int ret; @@ -926,7 +901,14 @@ static int tb_port_get_link_speed(struct tb_port *port) return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10; } -static int tb_port_get_link_width(struct tb_port *port) +/** + * tb_port_get_link_width() - Get current link width + * @port: Port to check (USB4 or CIO) + * + * Returns link width. Return values can be 1 (Single-Lane), 2 (Dual-Lane) + * or negative errno in case of failure. + */ +int tb_port_get_link_width(struct tb_port *port) { u32 val; int ret; @@ -962,7 +944,17 @@ static bool tb_port_is_width_supported(struct tb_port *port, int width) return !!(widths & width); } -static int tb_port_set_link_width(struct tb_port *port, unsigned int width) +/** + * tb_port_set_link_width() - Set target link width of the lane adapter + * @port: Lane adapter + * @width: Target link width (%1 or %2) + * + * Sets the target link width of the lane adapter to @width. Does not + * enable/disable lane bonding. For that call tb_port_set_lane_bonding(). + * + * Return: %0 in case of success and negative errno in case of error + */ +int tb_port_set_link_width(struct tb_port *port, unsigned int width) { u32 val; int ret; @@ -989,13 +981,71 @@ static int tb_port_set_link_width(struct tb_port *port, unsigned int width) return -EINVAL; } - val |= LANE_ADP_CS_1_LB; - return tb_port_write(port, &val, TB_CFG_PORT, port->cap_phy + LANE_ADP_CS_1, 1); } -static int tb_port_lane_bonding_enable(struct tb_port *port) +/** + * tb_port_set_lane_bonding() - Enable/disable lane bonding + * @port: Lane adapter + * @bonding: enable/disable bonding + * + * Enables or disables lane bonding. This should be called after target + * link width has been set (tb_port_set_link_width()). Note in most + * cases one should use tb_port_lane_bonding_enable() instead to enable + * lane bonding. + * + * As a side effect sets @port->bonding accordingly (and does the same + * for lane 1 too). + * + * Return: %0 in case of success and negative errno in case of error + */ +int tb_port_set_lane_bonding(struct tb_port *port, bool bonding) +{ + u32 val; + int ret; + + if (!port->cap_phy) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + if (bonding) + val |= LANE_ADP_CS_1_LB; + else + val &= ~LANE_ADP_CS_1_LB; + + ret = tb_port_write(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + /* + * When lane 0 bonding is set it will affect lane 1 too so + * update both. + */ + port->bonded = bonding; + port->dual_link_port->bonded = bonding; + + return 0; +} + +/** + * tb_port_lane_bonding_enable() - Enable bonding on port + * @port: port to enable + * + * Enable bonding by setting the link width of the port and the other + * port in case of dual link port. Does not wait for the link to + * actually reach the bonded state so caller needs to call + * tb_port_wait_for_link_width() before enabling any paths through the + * link to make sure the link is in expected state. + * + * Return: %0 in case of success and negative errno in case of error + */ +int tb_port_lane_bonding_enable(struct tb_port *port) { int ret; @@ -1007,34 +1057,291 @@ static int tb_port_lane_bonding_enable(struct tb_port *port) if (ret == 1) { ret = tb_port_set_link_width(port, 2); if (ret) - return ret; + goto err_lane0; } ret = tb_port_get_link_width(port->dual_link_port); if (ret == 1) { ret = tb_port_set_link_width(port->dual_link_port, 2); - if (ret) { - tb_port_set_link_width(port, 1); - return ret; - } + if (ret) + goto err_lane0; } - port->bonded = true; - port->dual_link_port->bonded = true; + ret = tb_port_set_lane_bonding(port, true); + if (ret) + goto err_lane1; return 0; + +err_lane1: + tb_port_set_link_width(port->dual_link_port, 1); +err_lane0: + tb_port_set_link_width(port, 1); + return ret; } -static void tb_port_lane_bonding_disable(struct tb_port *port) +/** + * tb_port_lane_bonding_disable() - Disable bonding on port + * @port: port to disable + * + * Disable bonding by setting the link width of the port and the + * other port in case of dual link port. + */ +void tb_port_lane_bonding_disable(struct tb_port *port) { - port->dual_link_port->bonded = false; - port->bonded = false; - + tb_port_set_lane_bonding(port, false); tb_port_set_link_width(port->dual_link_port, 1); tb_port_set_link_width(port, 1); } /** + * tb_port_wait_for_link_width() - Wait until link reaches specific width + * @port: Port to wait for + * @width: Expected link width (%1 or %2) + * @timeout_msec: Timeout in ms how long to wait + * + * Should be used after both ends of the link have been bonded (or + * bonding has been disabled) to wait until the link actually reaches + * the expected state. Returns %-ETIMEDOUT if the @width was not reached + * within the given timeout, %0 if it did. + */ +int tb_port_wait_for_link_width(struct tb_port *port, int width, + int timeout_msec) +{ + ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); + int ret; + + do { + ret = tb_port_get_link_width(port); + if (ret < 0) { + /* + * Sometimes we get port locked error when + * polling the lanes so we can ignore it and + * retry. + */ + if (ret != -EACCES) + return ret; + } else if (ret == width) { + return 0; + } + + usleep_range(1000, 2000); + } while (ktime_before(ktime_get(), timeout)); + + return -ETIMEDOUT; +} + +static int tb_port_do_update_credits(struct tb_port *port) +{ + u32 nfc_credits; + int ret; + + ret = tb_port_read(port, &nfc_credits, TB_CFG_PORT, ADP_CS_4, 1); + if (ret) + return ret; + + if (nfc_credits != port->config.nfc_credits) { + u32 total; + + total = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> + ADP_CS_4_TOTAL_BUFFERS_SHIFT; + + tb_port_dbg(port, "total credits changed %u -> %u\n", + port->total_credits, total); + + port->config.nfc_credits = nfc_credits; + port->total_credits = total; + } + + return 0; +} + +/** + * tb_port_update_credits() - Re-read port total credits + * @port: Port to update + * + * After the link is bonded (or bonding was disabled) the port total + * credits may change, so this function needs to be called to re-read + * the credits. Updates also the second lane adapter. + */ +int tb_port_update_credits(struct tb_port *port) +{ + int ret; + + ret = tb_port_do_update_credits(port); + if (ret) + return ret; + return tb_port_do_update_credits(port->dual_link_port); +} + +static int __tb_port_pm_secondary_set(struct tb_port *port, bool secondary) +{ + u32 phy; + int ret; + + ret = tb_port_read(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + if (secondary) + phy |= LANE_ADP_CS_1_PMS; + else + phy &= ~LANE_ADP_CS_1_PMS; + + return tb_port_write(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); +} + +static int tb_port_pm_secondary_enable(struct tb_port *port) +{ + return __tb_port_pm_secondary_set(port, true); +} + +static int tb_port_pm_secondary_disable(struct tb_port *port) +{ + return __tb_port_pm_secondary_set(port, false); +} + +/* Called for USB4 or Titan Ridge routers only */ +static bool tb_port_clx_supported(struct tb_port *port, unsigned int clx_mask) +{ + u32 val, mask = 0; + bool ret; + + /* Don't enable CLx in case of two single-lane links */ + if (!port->bonded && port->dual_link_port) + return false; + + /* Don't enable CLx in case of inter-domain link */ + if (port->xdomain) + return false; + + if (tb_switch_is_usb4(port->sw)) { + if (!usb4_port_clx_supported(port)) + return false; + } else if (!tb_lc_is_clx_supported(port)) { + return false; + } + + if (clx_mask & TB_CL1) { + /* CL0s and CL1 are enabled and supported together */ + mask |= LANE_ADP_CS_0_CL0S_SUPPORT | LANE_ADP_CS_0_CL1_SUPPORT; + } + if (clx_mask & TB_CL2) + mask |= LANE_ADP_CS_0_CL2_SUPPORT; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_0, 1); + if (ret) + return false; + + return !!(val & mask); +} + +static int __tb_port_clx_set(struct tb_port *port, enum tb_clx clx, bool enable) +{ + u32 phy, mask; + int ret; + + /* CL0s and CL1 are enabled and supported together */ + if (clx == TB_CL1) + mask = LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE; + else + /* For now we support only CL0s and CL1. Not CL2 */ + return -EOPNOTSUPP; + + ret = tb_port_read(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + if (enable) + phy |= mask; + else + phy &= ~mask; + + return tb_port_write(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); +} + +static int tb_port_clx_disable(struct tb_port *port, enum tb_clx clx) +{ + return __tb_port_clx_set(port, clx, false); +} + +static int tb_port_clx_enable(struct tb_port *port, enum tb_clx clx) +{ + return __tb_port_clx_set(port, clx, true); +} + +/** + * tb_port_is_clx_enabled() - Is given CL state enabled + * @port: USB4 port to check + * @clx_mask: Mask of CL states to check + * + * Returns true if any of the given CL states is enabled for @port. + */ +bool tb_port_is_clx_enabled(struct tb_port *port, unsigned int clx_mask) +{ + u32 val, mask = 0; + int ret; + + if (!tb_port_clx_supported(port, clx_mask)) + return false; + + if (clx_mask & TB_CL1) + mask |= LANE_ADP_CS_1_CL0S_ENABLE | LANE_ADP_CS_1_CL1_ENABLE; + if (clx_mask & TB_CL2) + mask |= LANE_ADP_CS_1_CL2_ENABLE; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return false; + + return !!(val & mask); +} + +static int tb_port_start_lane_initialization(struct tb_port *port) +{ + int ret; + + if (tb_switch_is_usb4(port->sw)) + return 0; + + ret = tb_lc_start_lane_initialization(port); + return ret == -EINVAL ? 0 : ret; +} + +/* + * Returns true if the port had something (router, XDomain) connected + * before suspend. + */ +static bool tb_port_resume(struct tb_port *port) +{ + bool has_remote = tb_port_has_remote(port); + + if (port->usb4) { + usb4_port_device_resume(port->usb4); + } else if (!has_remote) { + /* + * For disconnected downstream lane adapters start lane + * initialization now so we detect future connects. + * + * For XDomain start the lane initialzation now so the + * link gets re-established. + * + * This is only needed for non-USB4 ports. + */ + if (!tb_is_upstream_port(port) || port->xdomain) + tb_port_start_lane_initialization(port); + } + + return has_remote || port->xdomain; +} + +/** * tb_port_is_enabled() - Is the adapter port enabled * @port: Port to check */ @@ -1165,7 +1472,9 @@ int tb_dp_port_hpd_clear(struct tb_port *port) * @aux_tx: AUX TX Hop ID * @aux_rx: AUX RX Hop ID * - * Programs specified Hop IDs for DP IN/OUT port. + * Programs specified Hop IDs for DP IN/OUT port. Can be called for USB4 + * router DP adapters too but does not program the values as the fields + * are read-only. */ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, unsigned int aux_tx, unsigned int aux_rx) @@ -1173,6 +1482,9 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, u32 data[2]; int ret; + if (tb_switch_is_usb4(port->sw)) + return 0; + ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); if (ret) @@ -1271,30 +1583,65 @@ static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) } /** - * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET + * tb_switch_reset() - reconfigure route, enable and send TB_CFG_PKG_RESET + * @sw: Switch to reset * * Return: Returns 0 on success or an error code on failure. */ -int tb_switch_reset(struct tb *tb, u64 route) +int tb_switch_reset(struct tb_switch *sw) { struct tb_cfg_result res; - struct tb_regs_switch_header header = { - header.route_hi = route >> 32, - header.route_lo = route, - header.enabled = true, - }; - tb_dbg(tb, "resetting switch at %llx\n", route); - res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route, - 0, 2, 2, 2); + + if (sw->generation > 1) + return 0; + + tb_sw_dbg(sw, "resetting switch\n"); + + res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2, + TB_CFG_SWITCH, 2, 2); if (res.err) return res.err; - res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT); + res = tb_cfg_reset(sw->tb->ctl, tb_route(sw)); if (res.err > 0) return -EIO; return res.err; } /** + * tb_switch_wait_for_bit() - Wait for specified value of bits in offset + * @sw: Router to read the offset value from + * @offset: Offset in the router config space to read from + * @bit: Bit mask in the offset to wait for + * @value: Value of the bits to wait for + * @timeout_msec: Timeout in ms how long to wait + * + * Wait till the specified bits in specified offset reach specified value. + * Returns %0 in case of success, %-ETIMEDOUT if the @value was not reached + * within the given timeout or a negative errno in case of failure. + */ +int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, + u32 value, int timeout_msec) +{ + ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); + + do { + u32 val; + int ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); + if (ret) + return ret; + + if ((val & bit) == value) + return 0; + + usleep_range(50, 100); + } while (ktime_before(ktime_get(), timeout)); + + return -ETIMEDOUT; +} + +/* * tb_plug_events_active() - enable/disable plug events on a switch * * Also configures a sane plug_events_delay of 255ms. @@ -1306,7 +1653,7 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active) u32 data; int res; - if (tb_switch_is_icm(sw)) + if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw)) return 0; sw->config.plug_events_delay = 0xff; @@ -1314,10 +1661,6 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active) if (res) return res; - /* Plug events are always enabled in USB4 */ - if (tb_switch_is_usb4(sw)) - return 0; - res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); if (res) return res; @@ -1330,7 +1673,13 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active) case PCI_DEVICE_ID_INTEL_PORT_RIDGE: break; default: - data |= 4; + /* + * Skip Alpine Ridge, it needs to have vendor + * specific USB hotplug event enabled for the + * internal xHCI to work. + */ + if (!tb_switch_is_alpine_ridge(sw)) + data |= TB_PLUG_EVENTS_USB_DISABLE; } } else { data = data | 0x7c; @@ -1345,20 +1694,55 @@ static ssize_t authorized_show(struct device *dev, { struct tb_switch *sw = tb_to_switch(dev); - return sprintf(buf, "%u\n", sw->authorized); + return sysfs_emit(buf, "%u\n", sw->authorized); +} + +static int disapprove_switch(struct device *dev, void *not_used) +{ + char *envp[] = { "AUTHORIZED=0", NULL }; + struct tb_switch *sw; + + sw = tb_to_switch(dev); + if (sw && sw->authorized) { + int ret; + + /* First children */ + ret = device_for_each_child_reverse(&sw->dev, NULL, disapprove_switch); + if (ret) + return ret; + + ret = tb_domain_disapprove_switch(sw->tb, sw); + if (ret) + return ret; + + sw->authorized = 0; + kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp); + } + + return 0; } static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) { + char envp_string[13]; int ret = -EINVAL; + char *envp[] = { envp_string, NULL }; if (!mutex_trylock(&sw->tb->lock)) return restart_syscall(); - if (sw->authorized) + if (!!sw->authorized == !!val) goto unlock; switch (val) { + /* Disapprove switch */ + case 0: + if (tb_route(sw)) { + ret = disapprove_switch(&sw->dev, NULL); + goto unlock; + } + break; + /* Approve switch */ case 1: if (sw->key) @@ -1379,8 +1763,12 @@ static int tb_switch_set_authorized(struct tb_switch *sw, unsigned int val) if (!ret) { sw->authorized = val; - /* Notify status change to the userspace */ - kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); + /* + * Notify status change to the userspace, informing the new + * value of /sys/bus/thunderbolt/devices/.../authorized. + */ + sprintf(envp_string, "AUTHORIZED=%u", sw->authorized); + kobject_uevent_env(&sw->dev.kobj, KOBJ_CHANGE, envp); } unlock: @@ -1416,7 +1804,7 @@ static ssize_t boot_show(struct device *dev, struct device_attribute *attr, { struct tb_switch *sw = tb_to_switch(dev); - return sprintf(buf, "%u\n", sw->boot); + return sysfs_emit(buf, "%u\n", sw->boot); } static DEVICE_ATTR_RO(boot); @@ -1425,7 +1813,7 @@ static ssize_t device_show(struct device *dev, struct device_attribute *attr, { struct tb_switch *sw = tb_to_switch(dev); - return sprintf(buf, "%#x\n", sw->device); + return sysfs_emit(buf, "%#x\n", sw->device); } static DEVICE_ATTR_RO(device); @@ -1434,7 +1822,7 @@ device_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tb_switch *sw = tb_to_switch(dev); - return sprintf(buf, "%s\n", sw->device_name ? sw->device_name : ""); + return sysfs_emit(buf, "%s\n", sw->device_name ?: ""); } static DEVICE_ATTR_RO(device_name); @@ -1443,7 +1831,7 @@ generation_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tb_switch *sw = tb_to_switch(dev); - return sprintf(buf, "%u\n", sw->generation); + return sysfs_emit(buf, "%u\n", sw->generation); } static DEVICE_ATTR_RO(generation); @@ -1457,9 +1845,9 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr, return restart_syscall(); if (sw->key) - ret = sprintf(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); + ret = sysfs_emit(buf, "%*phN\n", TB_SWITCH_KEY_SIZE, sw->key); else - ret = sprintf(buf, "\n"); + ret = sysfs_emit(buf, "\n"); mutex_unlock(&sw->tb->lock); return ret; @@ -1504,7 +1892,7 @@ static ssize_t speed_show(struct device *dev, struct device_attribute *attr, { struct tb_switch *sw = tb_to_switch(dev); - return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed); + return sysfs_emit(buf, "%u.0 Gb/s\n", sw->link_speed); } /* @@ -1519,7 +1907,7 @@ static ssize_t lanes_show(struct device *dev, struct device_attribute *attr, { struct tb_switch *sw = tb_to_switch(dev); - return sprintf(buf, "%u\n", sw->link_width); + return sysfs_emit(buf, "%u\n", sw->link_width); } /* @@ -1536,15 +1924,14 @@ static ssize_t nvm_authenticate_show(struct device *dev, u32 status; nvm_get_auth_status(sw, &status); - return sprintf(buf, "%#x\n", status); + return sysfs_emit(buf, "%#x\n", status); } -static ssize_t nvm_authenticate_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t count) +static ssize_t nvm_authenticate_sysfs(struct device *dev, const char *buf, + bool disconnect) { struct tb_switch *sw = tb_to_switch(dev); - bool val; - int ret; + int val, ret; pm_runtime_get_sync(&sw->dev); @@ -1553,31 +1940,48 @@ static ssize_t nvm_authenticate_store(struct device *dev, goto exit_rpm; } + if (sw->no_nvm_upgrade) { + ret = -EOPNOTSUPP; + goto exit_unlock; + } + /* If NVMem devices are not yet added */ if (!sw->nvm) { ret = -EAGAIN; goto exit_unlock; } - ret = kstrtobool(buf, &val); + ret = kstrtoint(buf, 10, &val); if (ret) goto exit_unlock; /* Always clear the authentication status */ nvm_clear_auth_status(sw); - if (val) { - if (!sw->nvm->buf) { - ret = -EINVAL; - goto exit_unlock; + if (val > 0) { + if (val == AUTHENTICATE_ONLY) { + if (disconnect) + ret = -EINVAL; + else + ret = nvm_authenticate(sw, true); + } else { + if (!sw->nvm->flushed) { + if (!sw->nvm->buf) { + ret = -EINVAL; + goto exit_unlock; + } + + ret = nvm_validate_and_write(sw); + if (ret || val == WRITE_ONLY) + goto exit_unlock; + } + if (val == WRITE_AND_AUTHENTICATE) { + if (disconnect) + ret = tb_lc_force_power(sw); + else + ret = nvm_authenticate(sw, false); + } } - - ret = nvm_validate_and_write(sw); - if (ret) - goto exit_unlock; - - sw->nvm->authenticating = true; - ret = nvm_authenticate(sw); } exit_unlock: @@ -1586,12 +1990,35 @@ exit_rpm: pm_runtime_mark_last_busy(&sw->dev); pm_runtime_put_autosuspend(&sw->dev); + return ret; +} + +static ssize_t nvm_authenticate_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int ret = nvm_authenticate_sysfs(dev, buf, false); if (ret) return ret; return count; } static DEVICE_ATTR_RW(nvm_authenticate); +static ssize_t nvm_authenticate_on_disconnect_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return nvm_authenticate_show(dev, attr, buf); +} + +static ssize_t nvm_authenticate_on_disconnect_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + int ret; + + ret = nvm_authenticate_sysfs(dev, buf, true); + return ret ? ret : count; +} +static DEVICE_ATTR_RW(nvm_authenticate_on_disconnect); + static ssize_t nvm_version_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1606,7 +2033,7 @@ static ssize_t nvm_version_show(struct device *dev, else if (!sw->nvm) ret = -EAGAIN; else - ret = sprintf(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); + ret = sysfs_emit(buf, "%x.%x\n", sw->nvm->major, sw->nvm->minor); mutex_unlock(&sw->tb->lock); @@ -1619,7 +2046,7 @@ static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, { struct tb_switch *sw = tb_to_switch(dev); - return sprintf(buf, "%#x\n", sw->vendor); + return sysfs_emit(buf, "%#x\n", sw->vendor); } static DEVICE_ATTR_RO(vendor); @@ -1628,7 +2055,7 @@ vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tb_switch *sw = tb_to_switch(dev); - return sprintf(buf, "%s\n", sw->vendor_name ? sw->vendor_name : ""); + return sysfs_emit(buf, "%s\n", sw->vendor_name ?: ""); } static DEVICE_ATTR_RO(vendor_name); @@ -1637,7 +2064,7 @@ static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, { struct tb_switch *sw = tb_to_switch(dev); - return sprintf(buf, "%pUb\n", sw->uuid); + return sysfs_emit(buf, "%pUb\n", sw->uuid); } static DEVICE_ATTR_RO(unique_id); @@ -1649,6 +2076,7 @@ static struct attribute *switch_attrs[] = { &dev_attr_generation.attr, &dev_attr_key.attr, &dev_attr_nvm_authenticate.attr, + &dev_attr_nvm_authenticate_on_disconnect.attr, &dev_attr_nvm_version.attr, &dev_attr_rx_speed.attr, &dev_attr_rx_lanes.attr, @@ -1663,10 +2091,14 @@ static struct attribute *switch_attrs[] = { static umode_t switch_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct tb_switch *sw = tb_to_switch(dev); - if (attr == &dev_attr_device.attr) { + if (attr == &dev_attr_authorized.attr) { + if (sw->tb->security_level == TB_SECURITY_NOPCIE || + sw->tb->security_level == TB_SECURITY_DPONLY) + return 0; + } else if (attr == &dev_attr_device.attr) { if (!sw->device) return 0; } else if (attr == &dev_attr_device_name.attr) { @@ -1703,12 +2135,16 @@ static umode_t switch_attr_is_visible(struct kobject *kobj, if (tb_route(sw)) return attr->mode; return 0; + } else if (attr == &dev_attr_nvm_authenticate_on_disconnect.attr) { + if (sw->quirks & QUIRK_FORCE_POWER_LINK_CONTROLLER) + return attr->mode; + return 0; } return sw->safe_mode ? 0 : attr->mode; } -static struct attribute_group switch_group = { +static const struct attribute_group switch_group = { .is_visible = switch_attr_is_visible, .attrs = switch_attrs, }; @@ -1726,10 +2162,8 @@ static void tb_switch_release(struct device *dev) dma_port_free(sw->dma_port); tb_switch_for_each_port(sw, port) { - if (!port->disabled) { - ida_destroy(&port->in_hopids); - ida_destroy(&port->out_hopids); - } + ida_destroy(&port->in_hopids); + ida_destroy(&port->out_hopids); } kfree(sw->uuid); @@ -1741,6 +2175,39 @@ static void tb_switch_release(struct device *dev) kfree(sw); } +static int tb_switch_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct tb_switch *sw = tb_to_switch(dev); + const char *type; + + if (sw->config.thunderbolt_version == USB4_VERSION_1_0) { + if (add_uevent_var(env, "USB4_VERSION=1.0")) + return -ENOMEM; + } + + if (!tb_route(sw)) { + type = "host"; + } else { + const struct tb_port *port; + bool hub = false; + + /* Device is hub if it has any downstream ports */ + tb_switch_for_each_port(sw, port) { + if (!port->disabled && !tb_is_upstream_port(port) && + tb_port_is_null(port)) { + hub = true; + break; + } + } + + type = hub ? "hub" : "device"; + } + + if (add_uevent_var(env, "USB4_TYPE=%s", type)) + return -ENOMEM; + return 0; +} + /* * Currently only need to provide the callbacks. Everything else is handled * in the connection manager. @@ -1774,6 +2241,7 @@ static const struct dev_pm_ops tb_switch_pm_ops = { struct device_type tb_switch_type = { .name = "thunderbolt_device", .release = tb_switch_release, + .uevent = tb_switch_uevent, .pm = &tb_switch_pm_ops, }; @@ -1909,16 +2377,30 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, /* minimum setup for tb_find_cap and tb_drom_read to work */ sw->ports[i].sw = sw; sw->ports[i].port = i; + + /* Control port does not need HopID allocation */ + if (i) { + ida_init(&sw->ports[i].in_hopids); + ida_init(&sw->ports[i].out_hopids); + } } ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); if (ret > 0) sw->cap_plug_events = ret; + ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_TIME2); + if (ret > 0) + sw->cap_vsec_tmu = ret; + ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); if (ret > 0) sw->cap_lc = ret; + ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_CP_LP); + if (ret > 0) + sw->cap_lp = ret; + /* Root switch is always authorized */ if (!route) sw->authorized = true; @@ -1998,7 +2480,7 @@ int tb_switch_configure(struct tb_switch *sw) route = tb_route(sw); tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n", - sw->config.enabled ? "restoring " : "initializing", route, + sw->config.enabled ? "restoring" : "initializing", route, tb_route_length(route), sw->config.upstream_port_number); sw->config.enabled = 1; @@ -2010,6 +2492,7 @@ int tb_switch_configure(struct tb_switch *sw) * additional capabilities. */ sw->config.cmuv = USB4_VERSION_1_0; + sw->config.plug_events_delay = 0xa; /* Enumerate the switch */ ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, @@ -2018,10 +2501,6 @@ int tb_switch_configure(struct tb_switch *sw) return ret; ret = usb4_switch_setup(sw); - if (ret) - return ret; - - ret = usb4_switch_configure_link(sw); } else { if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) tb_sw_warn(sw, "unknown switch vendor id %#x\n", @@ -2035,10 +2514,6 @@ int tb_switch_configure(struct tb_switch *sw) /* Enumerate the switch */ ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, ROUTER_CS_1, 3); - if (ret) - return ret; - - ret = tb_lc_configure_link(sw); } if (ret) return ret; @@ -2103,8 +2578,9 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) if (tb_route(sw)) return 0; - /* fallthrough */ + fallthrough; case 3: + case 4: ret = tb_switch_set_uuid(sw); if (ret) return ret; @@ -2120,6 +2596,22 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) break; } + if (sw->no_nvm_upgrade) + return 0; + + if (tb_switch_is_usb4(sw)) { + ret = usb4_switch_nvm_authenticate_status(sw, &status); + if (ret) + return ret; + + if (status) { + tb_sw_info(sw, "switch flash authentication failed\n"); + nvm_set_auth_status(sw, status); + } + + return 0; + } + /* Root switch DMA port requires running firmware */ if (!tb_route(sw) && !tb_switch_is_icm(sw)) return 0; @@ -2128,9 +2620,6 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) if (!sw->dma_port) return 0; - if (sw->no_nvm_upgrade) - return 0; - /* * If there is status already set then authentication failed * when the dma_port_flash_update_auth() returned. Power cycling @@ -2176,7 +2665,7 @@ static void tb_switch_default_link_ports(struct tb_switch *sw) { int i; - for (i = 1; i <= sw->config.max_port_number; i += 2) { + for (i = 1; i <= sw->config.max_port_number; i++) { struct tb_port *port = &sw->ports[i]; struct tb_port *subordinate; @@ -2287,6 +2776,14 @@ int tb_switch_lane_bonding_enable(struct tb_switch *sw) return ret; } + ret = tb_port_wait_for_link_width(down, 2, 100); + if (ret) { + tb_port_warn(down, "timeout enabling lane bonding\n"); + return ret; + } + + tb_port_update_credits(down); + tb_port_update_credits(up); tb_switch_update_link_attributes(sw); tb_sw_dbg(sw, "lane bonding enabled\n"); @@ -2317,11 +2814,114 @@ void tb_switch_lane_bonding_disable(struct tb_switch *sw) tb_port_lane_bonding_disable(up); tb_port_lane_bonding_disable(down); + /* + * It is fine if we get other errors as the router might have + * been unplugged. + */ + if (tb_port_wait_for_link_width(down, 1, 100) == -ETIMEDOUT) + tb_sw_warn(sw, "timeout disabling lane bonding\n"); + + tb_port_update_credits(down); + tb_port_update_credits(up); tb_switch_update_link_attributes(sw); + tb_sw_dbg(sw, "lane bonding disabled\n"); } /** + * tb_switch_configure_link() - Set link configured + * @sw: Switch whose link is configured + * + * Sets the link upstream from @sw configured (from both ends) so that + * it will not be disconnected when the domain exits sleep. Can be + * called for any switch. + * + * It is recommended that this is called after lane bonding is enabled. + * + * Returns %0 on success and negative errno in case of error. + */ +int tb_switch_configure_link(struct tb_switch *sw) +{ + struct tb_port *up, *down; + int ret; + + if (!tb_route(sw) || tb_switch_is_icm(sw)) + return 0; + + up = tb_upstream_port(sw); + if (tb_switch_is_usb4(up->sw)) + ret = usb4_port_configure(up); + else + ret = tb_lc_configure_port(up); + if (ret) + return ret; + + down = up->remote; + if (tb_switch_is_usb4(down->sw)) + return usb4_port_configure(down); + return tb_lc_configure_port(down); +} + +/** + * tb_switch_unconfigure_link() - Unconfigure link + * @sw: Switch whose link is unconfigured + * + * Sets the link unconfigured so the @sw will be disconnected if the + * domain exists sleep. + */ +void tb_switch_unconfigure_link(struct tb_switch *sw) +{ + struct tb_port *up, *down; + + if (sw->is_unplugged) + return; + if (!tb_route(sw) || tb_switch_is_icm(sw)) + return; + + up = tb_upstream_port(sw); + if (tb_switch_is_usb4(up->sw)) + usb4_port_unconfigure(up); + else + tb_lc_unconfigure_port(up); + + down = up->remote; + if (tb_switch_is_usb4(down->sw)) + usb4_port_unconfigure(down); + else + tb_lc_unconfigure_port(down); +} + +static void tb_switch_credits_init(struct tb_switch *sw) +{ + if (tb_switch_is_icm(sw)) + return; + if (!tb_switch_is_usb4(sw)) + return; + if (usb4_switch_credits_init(sw)) + tb_sw_info(sw, "failed to determine preferred buffer allocation, using defaults\n"); +} + +static int tb_switch_port_hotplug_enable(struct tb_switch *sw) +{ + struct tb_port *port; + + if (tb_switch_is_icm(sw)) + return 0; + + tb_switch_for_each_port(sw, port) { + int res; + + if (!port->cap_usb4) + continue; + + res = usb4_port_hotplug_enable(port); + if (res) + return res; + } + return 0; +} + +/** * tb_switch_add() - Add a switch to the domain * @sw: Switch to add * @@ -2351,14 +2951,16 @@ int tb_switch_add(struct tb_switch *sw) } if (!sw->safe_mode) { + tb_switch_credits_init(sw); + /* read drom */ ret = tb_drom_read(sw); - if (ret) { - dev_err(&sw->dev, "reading DROM failed\n"); - return ret; - } + if (ret) + dev_warn(&sw->dev, "reading DROM failed: %d\n", ret); tb_sw_dbg(sw, "uid: %#llx\n", sw->uid); + tb_check_quirks(sw); + ret = tb_switch_set_uuid(sw); if (ret) { dev_err(&sw->dev, "failed to set UUID\n"); @@ -2388,6 +2990,10 @@ int tb_switch_add(struct tb_switch *sw) return ret; } + ret = tb_switch_port_hotplug_enable(sw); + if (ret) + return ret; + ret = device_add(&sw->dev); if (ret) { dev_err(&sw->dev, "failed to add device: %d\n", ret); @@ -2402,13 +3008,25 @@ int tb_switch_add(struct tb_switch *sw) sw->device_name); } + ret = usb4_switch_add_ports(sw); + if (ret) { + dev_err(&sw->dev, "failed to add USB4 ports\n"); + goto err_del; + } + ret = tb_switch_nvm_add(sw); if (ret) { dev_err(&sw->dev, "failed to add NVM devices\n"); - device_del(&sw->dev); - return ret; + goto err_ports; } + /* + * Thunderbolt routers do not generate wakeups themselves but + * they forward wakeups from tunneled protocols, so enable it + * here. + */ + device_init_wakeup(&sw->dev, true); + pm_runtime_set_active(&sw->dev); if (sw->rpm) { pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); @@ -2418,7 +3036,15 @@ int tb_switch_add(struct tb_switch *sw) pm_request_autosuspend(&sw->dev); } + tb_switch_debugfs_init(sw); return 0; + +err_ports: + usb4_switch_remove_ports(sw); +err_del: + device_del(&sw->dev); + + return ret; } /** @@ -2433,6 +3059,8 @@ void tb_switch_remove(struct tb_switch *sw) { struct tb_port *port; + tb_switch_debugfs_remove(sw); + if (sw->rpm) { pm_runtime_get_sync(&sw->dev); pm_runtime_disable(&sw->dev); @@ -2447,17 +3075,16 @@ void tb_switch_remove(struct tb_switch *sw) tb_xdomain_remove(port->xdomain); port->xdomain = NULL; } + + /* Remove any downstream retimers */ + tb_retimer_remove_all(port); } if (!sw->is_unplugged) tb_plug_events_active(sw, false); - if (tb_switch_is_usb4(sw)) - usb4_switch_unconfigure_link(sw); - else - tb_lc_unconfigure_link(sw); - tb_switch_nvm_remove(sw); + usb4_switch_remove_ports(sw); if (tb_route(sw)) dev_info(&sw->dev, "device disconnected\n"); @@ -2466,6 +3093,7 @@ void tb_switch_remove(struct tb_switch *sw) /** * tb_sw_set_unplugged() - set is_unplugged on switch and downstream switches + * @sw: Router to mark unplugged */ void tb_sw_set_unplugged(struct tb_switch *sw) { @@ -2488,6 +3116,18 @@ void tb_sw_set_unplugged(struct tb_switch *sw) } } +static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags) +{ + if (flags) + tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags); + else + tb_sw_dbg(sw, "disabling wakeup\n"); + + if (tb_switch_is_usb4(sw)) + return usb4_switch_set_wake(sw, flags); + return tb_lc_set_wake(sw, flags); +} + int tb_switch_resume(struct tb_switch *sw) { struct tb_port *port; @@ -2513,6 +3153,10 @@ int tb_switch_resume(struct tb_switch *sw) return err; } + /* We don't have any way to confirm this was the same device */ + if (!sw->uid) + return -ENODEV; + if (tb_switch_is_usb4(sw)) err = usb4_switch_read_uid(sw, &uid); else @@ -2533,9 +3177,19 @@ int tb_switch_resume(struct tb_switch *sw) if (err) return err; + /* Disable wakes */ + tb_switch_set_wake(sw, 0); + + err = tb_switch_tmu_init(sw); + if (err) + return err; + /* check for surviving downstream switches */ tb_switch_for_each_port(sw, port) { - if (!tb_port_has_remote(port) && !port->xdomain) + if (!tb_port_is_null(port)) + continue; + + if (!tb_port_resume(port)) continue; if (tb_wait_for_port(port, true) <= 0) { @@ -2545,7 +3199,7 @@ int tb_switch_resume(struct tb_switch *sw) tb_sw_set_unplugged(port->remote->sw); else if (port->xdomain) port->xdomain->is_unplugged = true; - } else if (tb_port_has_remote(port) || port->xdomain) { + } else { /* * Always unlock the port so the downstream * switch/domain is accessible. @@ -2562,20 +3216,55 @@ int tb_switch_resume(struct tb_switch *sw) return 0; } -void tb_switch_suspend(struct tb_switch *sw) +/** + * tb_switch_suspend() - Put a switch to sleep + * @sw: Switch to suspend + * @runtime: Is this runtime suspend or system sleep + * + * Suspends router and all its children. Enables wakes according to + * value of @runtime and then sets sleep bit for the router. If @sw is + * host router the domain is ready to go to sleep once this function + * returns. + */ +void tb_switch_suspend(struct tb_switch *sw, bool runtime) { + unsigned int flags = 0; struct tb_port *port; int err; + tb_sw_dbg(sw, "suspending switch\n"); + + /* + * Actually only needed for Titan Ridge but for simplicity can be + * done for USB4 device too as CLx is re-enabled at resume. + * CL0s and CL1 are enabled and supported together. + */ + if (tb_switch_is_clx_enabled(sw, TB_CL1)) { + if (tb_switch_disable_clx(sw, TB_CL1)) + tb_sw_warn(sw, "failed to disable %s on upstream port\n", + tb_switch_clx_name(TB_CL1)); + } + err = tb_plug_events_active(sw, false); if (err) return; tb_switch_for_each_port(sw, port) { if (tb_port_has_remote(port)) - tb_switch_suspend(port->remote->sw); + tb_switch_suspend(port->remote->sw, runtime); } + if (runtime) { + /* Trigger wake when something is plugged in/out */ + flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT; + flags |= TB_WAKE_ON_USB4; + flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP; + } else if (device_may_wakeup(&sw->dev)) { + flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE; + } + + tb_switch_set_wake(sw, flags); + if (tb_switch_is_usb4(sw)) usb4_switch_set_sleep(sw); else @@ -2608,9 +3297,20 @@ bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) */ int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) { + int ret; + if (tb_switch_is_usb4(sw)) - return usb4_switch_alloc_dp_resource(sw, in); - return tb_lc_dp_sink_alloc(sw, in); + ret = usb4_switch_alloc_dp_resource(sw, in); + else + ret = tb_lc_dp_sink_alloc(sw, in); + + if (ret) + tb_sw_warn(sw, "failed to allocate DP resource for port %d\n", + in->port); + else + tb_sw_dbg(sw, "allocated DP resource for port %d\n", in->port); + + return ret; } /** @@ -2633,6 +3333,8 @@ void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) if (ret) tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n", in->port); + else + tb_sw_dbg(sw, "released DP resource for port %d\n", in->port); } struct tb_sw_lookup { @@ -2763,7 +3465,376 @@ struct tb_port *tb_switch_find_port(struct tb_switch *sw, return NULL; } -void tb_switch_exit(void) +static int tb_switch_pm_secondary_resolve(struct tb_switch *sw) +{ + struct tb_switch *parent = tb_switch_parent(sw); + struct tb_port *up, *down; + int ret; + + if (!tb_route(sw)) + return 0; + + up = tb_upstream_port(sw); + down = tb_port_at(tb_route(sw), parent); + ret = tb_port_pm_secondary_enable(up); + if (ret) + return ret; + + return tb_port_pm_secondary_disable(down); +} + +static int __tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx) +{ + struct tb_switch *parent = tb_switch_parent(sw); + bool up_clx_support, down_clx_support; + struct tb_port *up, *down; + int ret; + + if (!tb_switch_is_clx_supported(sw)) + return 0; + + /* + * Enable CLx for host router's downstream port as part of the + * downstream router enabling procedure. + */ + if (!tb_route(sw)) + return 0; + + /* Enable CLx only for first hop router (depth = 1) */ + if (tb_route(parent)) + return 0; + + ret = tb_switch_pm_secondary_resolve(sw); + if (ret) + return ret; + + up = tb_upstream_port(sw); + down = tb_port_at(tb_route(sw), parent); + + up_clx_support = tb_port_clx_supported(up, clx); + down_clx_support = tb_port_clx_supported(down, clx); + + tb_port_dbg(up, "%s %ssupported\n", tb_switch_clx_name(clx), + up_clx_support ? "" : "not "); + tb_port_dbg(down, "%s %ssupported\n", tb_switch_clx_name(clx), + down_clx_support ? "" : "not "); + + if (!up_clx_support || !down_clx_support) + return -EOPNOTSUPP; + + ret = tb_port_clx_enable(up, clx); + if (ret) + return ret; + + ret = tb_port_clx_enable(down, clx); + if (ret) { + tb_port_clx_disable(up, clx); + return ret; + } + + ret = tb_switch_mask_clx_objections(sw); + if (ret) { + tb_port_clx_disable(up, clx); + tb_port_clx_disable(down, clx); + return ret; + } + + sw->clx = clx; + + tb_port_dbg(up, "%s enabled\n", tb_switch_clx_name(clx)); + return 0; +} + +/** + * tb_switch_enable_clx() - Enable CLx on upstream port of specified router + * @sw: Router to enable CLx for + * @clx: The CLx state to enable + * + * Enable CLx state only for first hop router. That is the most common + * use-case, that is intended for better thermal management, and so helps + * to improve performance. CLx is enabled only if both sides of the link + * support CLx, and if both sides of the link are not configured as two + * single lane links and only if the link is not inter-domain link. The + * complete set of conditions is described in CM Guide 1.0 section 8.1. + * + * Return: Returns 0 on success or an error code on failure. + */ +int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx) +{ + struct tb_switch *root_sw = sw->tb->root_switch; + + if (!clx_enabled) + return 0; + + /* + * CLx is not enabled and validated on Intel USB4 platforms before + * Alder Lake. + */ + if (root_sw->generation < 4 || tb_switch_is_tiger_lake(root_sw)) + return 0; + + switch (clx) { + case TB_CL1: + /* CL0s and CL1 are enabled and supported together */ + return __tb_switch_enable_clx(sw, clx); + + default: + return -EOPNOTSUPP; + } +} + +static int __tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx) { - ida_destroy(&nvm_ida); + struct tb_switch *parent = tb_switch_parent(sw); + struct tb_port *up, *down; + int ret; + + if (!tb_switch_is_clx_supported(sw)) + return 0; + + /* + * Disable CLx for host router's downstream port as part of the + * downstream router enabling procedure. + */ + if (!tb_route(sw)) + return 0; + + /* Disable CLx only for first hop router (depth = 1) */ + if (tb_route(parent)) + return 0; + + up = tb_upstream_port(sw); + down = tb_port_at(tb_route(sw), parent); + ret = tb_port_clx_disable(up, clx); + if (ret) + return ret; + + ret = tb_port_clx_disable(down, clx); + if (ret) + return ret; + + sw->clx = TB_CLX_DISABLE; + + tb_port_dbg(up, "%s disabled\n", tb_switch_clx_name(clx)); + return 0; +} + +/** + * tb_switch_disable_clx() - Disable CLx on upstream port of specified router + * @sw: Router to disable CLx for + * @clx: The CLx state to disable + * + * Return: Returns 0 on success or an error code on failure. + */ +int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx) +{ + if (!clx_enabled) + return 0; + + switch (clx) { + case TB_CL1: + /* CL0s and CL1 are enabled and supported together */ + return __tb_switch_disable_clx(sw, clx); + + default: + return -EOPNOTSUPP; + } +} + +/** + * tb_switch_mask_clx_objections() - Mask CLx objections for a router + * @sw: Router to mask objections for + * + * Mask the objections coming from the second depth routers in order to + * stop these objections from interfering with the CLx states of the first + * depth link. + */ +int tb_switch_mask_clx_objections(struct tb_switch *sw) +{ + int up_port = sw->config.upstream_port_number; + u32 offset, val[2], mask_obj, unmask_obj; + int ret, i; + + /* Only Titan Ridge of pre-USB4 devices support CLx states */ + if (!tb_switch_is_titan_ridge(sw)) + return 0; + + if (!tb_route(sw)) + return 0; + + /* + * In Titan Ridge there are only 2 dual-lane Thunderbolt ports: + * Port A consists of lane adapters 1,2 and + * Port B consists of lane adapters 3,4 + * If upstream port is A, (lanes are 1,2), we mask objections from + * port B (lanes 3,4) and unmask objections from Port A and vice-versa. + */ + if (up_port == 1) { + mask_obj = TB_LOW_PWR_C0_PORT_B_MASK; + unmask_obj = TB_LOW_PWR_C1_PORT_A_MASK; + offset = TB_LOW_PWR_C1_CL1; + } else { + mask_obj = TB_LOW_PWR_C1_PORT_A_MASK; + unmask_obj = TB_LOW_PWR_C0_PORT_B_MASK; + offset = TB_LOW_PWR_C3_CL1; + } + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->cap_lp + offset, ARRAY_SIZE(val)); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(val); i++) { + val[i] |= mask_obj; + val[i] &= ~unmask_obj; + } + + return tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->cap_lp + offset, ARRAY_SIZE(val)); +} + +/* + * Can be used for read/write a specified PCIe bridge for any Thunderbolt 3 + * device. For now used only for Titan Ridge. + */ +static int tb_switch_pcie_bridge_write(struct tb_switch *sw, unsigned int bridge, + unsigned int pcie_offset, u32 value) +{ + u32 offset, command, val; + int ret; + + if (sw->generation != 3) + return -EOPNOTSUPP; + + offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_WR_DATA; + ret = tb_sw_write(sw, &value, TB_CFG_SWITCH, offset, 1); + if (ret) + return ret; + + command = pcie_offset & TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK; + command |= BIT(bridge + TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT); + command |= TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK; + command |= TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL + << TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT; + command |= TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK; + + offset = sw->cap_plug_events + TB_PLUG_EVENTS_PCIE_CMD; + + ret = tb_sw_write(sw, &command, TB_CFG_SWITCH, offset, 1); + if (ret) + return ret; + + ret = tb_switch_wait_for_bit(sw, offset, + TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK, 0, 100); + if (ret) + return ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); + if (ret) + return ret; + + if (val & TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK) + return -ETIMEDOUT; + + return 0; +} + +/** + * tb_switch_pcie_l1_enable() - Enable PCIe link to enter L1 state + * @sw: Router to enable PCIe L1 + * + * For Titan Ridge switch to enter CLx state, its PCIe bridges shall enable + * entry to PCIe L1 state. Shall be called after the upstream PCIe tunnel + * was configured. Due to Intel platforms limitation, shall be called only + * for first hop switch. + */ +int tb_switch_pcie_l1_enable(struct tb_switch *sw) +{ + struct tb_switch *parent = tb_switch_parent(sw); + int ret; + + if (!tb_route(sw)) + return 0; + + if (!tb_switch_is_titan_ridge(sw)) + return 0; + + /* Enable PCIe L1 enable only for first hop router (depth = 1) */ + if (tb_route(parent)) + return 0; + + /* Write to downstream PCIe bridge #5 aka Dn4 */ + ret = tb_switch_pcie_bridge_write(sw, 5, 0x143, 0x0c7806b1); + if (ret) + return ret; + + /* Write to Upstream PCIe bridge #0 aka Up0 */ + return tb_switch_pcie_bridge_write(sw, 0, 0x143, 0x0c5806b1); +} + +/** + * tb_switch_xhci_connect() - Connect internal xHCI + * @sw: Router whose xHCI to connect + * + * Can be called to any router. For Alpine Ridge and Titan Ridge + * performs special flows that bring the xHCI functional for any device + * connected to the type-C port. Call only after PCIe tunnel has been + * established. The function only does the connect if not done already + * so can be called several times for the same router. + */ +int tb_switch_xhci_connect(struct tb_switch *sw) +{ + struct tb_port *port1, *port3; + int ret; + + if (sw->generation != 3) + return 0; + + port1 = &sw->ports[1]; + port3 = &sw->ports[3]; + + if (tb_switch_is_alpine_ridge(sw)) { + bool usb_port1, usb_port3, xhci_port1, xhci_port3; + + usb_port1 = tb_lc_is_usb_plugged(port1); + usb_port3 = tb_lc_is_usb_plugged(port3); + xhci_port1 = tb_lc_is_xhci_connected(port1); + xhci_port3 = tb_lc_is_xhci_connected(port3); + + /* Figure out correct USB port to connect */ + if (usb_port1 && !xhci_port1) { + ret = tb_lc_xhci_connect(port1); + if (ret) + return ret; + } + if (usb_port3 && !xhci_port3) + return tb_lc_xhci_connect(port3); + } else if (tb_switch_is_titan_ridge(sw)) { + ret = tb_lc_xhci_connect(port1); + if (ret) + return ret; + return tb_lc_xhci_connect(port3); + } + + return 0; +} + +/** + * tb_switch_xhci_disconnect() - Disconnect internal xHCI + * @sw: Router whose xHCI to disconnect + * + * The opposite of tb_switch_xhci_connect(). Disconnects xHCI on both + * ports. + */ +void tb_switch_xhci_disconnect(struct tb_switch *sw) +{ + if (sw->generation == 3) { + struct tb_port *port1 = &sw->ports[1]; + struct tb_port *port3 = &sw->ports[3]; + + tb_lc_xhci_disconnect(port1); + tb_port_dbg(port1, "disconnected xHCI\n"); + tb_lc_xhci_disconnect(port3); + tb_port_dbg(port3, "disconnected xHCI\n"); + } } diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 107cd232f486..462845804427 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -9,11 +9,15 @@ #include <linux/slab.h> #include <linux/errno.h> #include <linux/delay.h> +#include <linux/pm_runtime.h> +#include <linux/platform_data/x86/apple.h> #include "tb.h" #include "tb_regs.h" #include "tunnel.h" +#define TB_TIMEOUT 100 /* ms */ + /** * struct tb_cm - Simple Thunderbolt connection manager * @tunnel_list: List of active tunnels @@ -22,13 +26,21 @@ * events and exit if this is not set (it needs to * acquire the lock one more time). Used to drain wq * after cfg has been paused. + * @remove_work: Work used to remove any unplugged routers after + * runtime resume */ struct tb_cm { struct list_head tunnel_list; struct list_head dp_resources; bool hotplug_active; + struct delayed_work remove_work; }; +static inline struct tb *tcm_to_tb(struct tb_cm *tcm) +{ + return ((void *)tcm - sizeof(struct tb)); +} + struct tb_hotplug_event { struct work_struct work; struct tb *tb; @@ -93,10 +105,37 @@ static void tb_remove_dp_resources(struct tb_switch *sw) } } -static void tb_discover_tunnels(struct tb_switch *sw) +static void tb_discover_dp_resource(struct tb *tb, struct tb_port *port) { - struct tb *tb = sw->tb; struct tb_cm *tcm = tb_priv(tb); + struct tb_port *p; + + list_for_each_entry(p, &tcm->dp_resources, list) { + if (p == port) + return; + } + + tb_port_dbg(port, "DP %s resource available discovered\n", + tb_port_is_dpin(port) ? "IN" : "OUT"); + list_add_tail(&port->list, &tcm->dp_resources); +} + +static void tb_discover_dp_resources(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel; + + list_for_each_entry(tunnel, &tcm->tunnel_list, list) { + if (tb_tunnel_is_dp(tunnel)) + tb_discover_dp_resource(tb, tunnel->dst_port); + } +} + +static void tb_switch_discover_tunnels(struct tb_switch *sw, + struct list_head *list, + bool alloc_hopids) +{ + struct tb *tb = sw->tb; struct tb_port *port; tb_switch_for_each_port(sw, port) { @@ -104,24 +143,48 @@ static void tb_discover_tunnels(struct tb_switch *sw) switch (port->config.type) { case TB_TYPE_DP_HDMI_IN: - tunnel = tb_tunnel_discover_dp(tb, port); + tunnel = tb_tunnel_discover_dp(tb, port, alloc_hopids); + /* + * In case of DP tunnel exists, change host router's + * 1st children TMU mode to HiFi for CL0s to work. + */ + if (tunnel) + tb_switch_enable_tmu_1st_child(tb->root_switch, + TB_SWITCH_TMU_RATE_HIFI); break; case TB_TYPE_PCIE_DOWN: - tunnel = tb_tunnel_discover_pci(tb, port); + tunnel = tb_tunnel_discover_pci(tb, port, alloc_hopids); break; case TB_TYPE_USB3_DOWN: - tunnel = tb_tunnel_discover_usb3(tb, port); + tunnel = tb_tunnel_discover_usb3(tb, port, alloc_hopids); break; default: break; } - if (!tunnel) - continue; + if (tunnel) + list_add_tail(&tunnel->list, list); + } + + tb_switch_for_each_port(sw, port) { + if (tb_port_has_remote(port)) { + tb_switch_discover_tunnels(port->remote->sw, list, + alloc_hopids); + } + } +} + +static void tb_discover_tunnels(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel; + tb_switch_discover_tunnels(tb->root_switch, &tcm->tunnel_list, true); + + list_for_each_entry(tunnel, &tcm->tunnel_list, list) { if (tb_tunnel_is_pci(tunnel)) { struct tb_switch *parent = tunnel->dst_port->sw; @@ -129,15 +192,29 @@ static void tb_discover_tunnels(struct tb_switch *sw) parent->boot = true; parent = tb_switch_parent(parent); } + } else if (tb_tunnel_is_dp(tunnel)) { + /* Keep the domain from powering down */ + pm_runtime_get_sync(&tunnel->src_port->sw->dev); + pm_runtime_get_sync(&tunnel->dst_port->sw->dev); } - - list_add_tail(&tunnel->list, &tcm->tunnel_list); } +} - tb_switch_for_each_port(sw, port) { - if (tb_port_has_remote(port)) - tb_discover_tunnels(port->remote->sw); - } +static int tb_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd) +{ + if (tb_switch_is_usb4(port->sw)) + return usb4_port_configure_xdomain(port, xd); + return tb_lc_configure_xdomain(port); +} + +static void tb_port_unconfigure_xdomain(struct tb_port *port) +{ + if (tb_switch_is_usb4(port->sw)) + usb4_port_unconfigure_xdomain(port); + else + tb_lc_unconfigure_xdomain(port); + + tb_port_enable(port->dual_link_port); } static void tb_scan_xdomain(struct tb_port *port) @@ -147,6 +224,9 @@ static void tb_scan_xdomain(struct tb_port *port) struct tb_xdomain *xd; u64 route; + if (!tb_is_xdomain_enabled()) + return; + route = tb_downstream_route(port); xd = tb_xdomain_find_by_route(tb, route); if (xd) { @@ -158,6 +238,7 @@ static void tb_scan_xdomain(struct tb_port *port) NULL); if (xd) { tb_port_at(route, sw)->xdomain = xd; + tb_port_configure_xdomain(port, xd); tb_xdomain_add(xd); } } @@ -167,7 +248,7 @@ static int tb_enable_tmu(struct tb_switch *sw) int ret; /* If it is already enabled in correct mode, don't touch it */ - if (tb_switch_tmu_is_enabled(sw)) + if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request)) return 0; ret = tb_switch_tmu_disable(sw); @@ -206,35 +287,213 @@ static struct tb_port *tb_find_unused_port(struct tb_switch *sw, } static struct tb_port *tb_find_usb3_down(struct tb_switch *sw, - const struct tb_port *port) + const struct tb_port *port) { struct tb_port *down; down = usb4_switch_map_usb3_down(sw, port); - if (down) { - if (WARN_ON(!tb_port_is_usb3_down(down))) - goto out; - if (WARN_ON(tb_usb3_port_is_enabled(down))) - goto out; - + if (down && !tb_usb3_port_is_enabled(down)) return down; + return NULL; +} + +static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, + struct tb_port *src_port, + struct tb_port *dst_port) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel; + + list_for_each_entry(tunnel, &tcm->tunnel_list, list) { + if (tunnel->type == type && + ((src_port && src_port == tunnel->src_port) || + (dst_port && dst_port == tunnel->dst_port))) { + return tunnel; + } } -out: - return tb_find_unused_port(sw, TB_TYPE_USB3_DOWN); + return NULL; +} + +static struct tb_tunnel *tb_find_first_usb3_tunnel(struct tb *tb, + struct tb_port *src_port, + struct tb_port *dst_port) +{ + struct tb_port *port, *usb3_down; + struct tb_switch *sw; + + /* Pick the router that is deepest in the topology */ + if (dst_port->sw->config.depth > src_port->sw->config.depth) + sw = dst_port->sw; + else + sw = src_port->sw; + + /* Can't be the host router */ + if (sw == tb->root_switch) + return NULL; + + /* Find the downstream USB4 port that leads to this router */ + port = tb_port_at(tb_route(sw), tb->root_switch); + /* Find the corresponding host router USB3 downstream port */ + usb3_down = usb4_switch_map_usb3_down(tb->root_switch, port); + if (!usb3_down) + return NULL; + + return tb_find_tunnel(tb, TB_TUNNEL_USB3, usb3_down, NULL); +} + +static int tb_available_bandwidth(struct tb *tb, struct tb_port *src_port, + struct tb_port *dst_port, int *available_up, int *available_down) +{ + int usb3_consumed_up, usb3_consumed_down, ret; + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel; + struct tb_port *port; + + tb_port_dbg(dst_port, "calculating available bandwidth\n"); + + tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); + if (tunnel) { + ret = tb_tunnel_consumed_bandwidth(tunnel, &usb3_consumed_up, + &usb3_consumed_down); + if (ret) + return ret; + } else { + usb3_consumed_up = 0; + usb3_consumed_down = 0; + } + + *available_up = *available_down = 40000; + + /* Find the minimum available bandwidth over all links */ + tb_for_each_port_on_path(src_port, dst_port, port) { + int link_speed, link_width, up_bw, down_bw; + + if (!tb_port_is_null(port)) + continue; + + if (tb_is_upstream_port(port)) { + link_speed = port->sw->link_speed; + } else { + link_speed = tb_port_get_link_speed(port); + if (link_speed < 0) + return link_speed; + } + + link_width = port->bonded ? 2 : 1; + + up_bw = link_speed * link_width * 1000; /* Mb/s */ + /* Leave 10% guard band */ + up_bw -= up_bw / 10; + down_bw = up_bw; + + tb_port_dbg(port, "link total bandwidth %d Mb/s\n", up_bw); + + /* + * Find all DP tunnels that cross the port and reduce + * their consumed bandwidth from the available. + */ + list_for_each_entry(tunnel, &tcm->tunnel_list, list) { + int dp_consumed_up, dp_consumed_down; + + if (!tb_tunnel_is_dp(tunnel)) + continue; + + if (!tb_tunnel_port_on_path(tunnel, port)) + continue; + + ret = tb_tunnel_consumed_bandwidth(tunnel, + &dp_consumed_up, + &dp_consumed_down); + if (ret) + return ret; + + up_bw -= dp_consumed_up; + down_bw -= dp_consumed_down; + } + + /* + * If USB3 is tunneled from the host router down to the + * branch leading to port we need to take USB3 consumed + * bandwidth into account regardless whether it actually + * crosses the port. + */ + up_bw -= usb3_consumed_up; + down_bw -= usb3_consumed_down; + + if (up_bw < *available_up) + *available_up = up_bw; + if (down_bw < *available_down) + *available_down = down_bw; + } + + if (*available_up < 0) + *available_up = 0; + if (*available_down < 0) + *available_down = 0; + + return 0; +} + +static int tb_release_unused_usb3_bandwidth(struct tb *tb, + struct tb_port *src_port, + struct tb_port *dst_port) +{ + struct tb_tunnel *tunnel; + + tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); + return tunnel ? tb_tunnel_release_unused_bandwidth(tunnel) : 0; +} + +static void tb_reclaim_usb3_bandwidth(struct tb *tb, struct tb_port *src_port, + struct tb_port *dst_port) +{ + int ret, available_up, available_down; + struct tb_tunnel *tunnel; + + tunnel = tb_find_first_usb3_tunnel(tb, src_port, dst_port); + if (!tunnel) + return; + + tb_dbg(tb, "reclaiming unused bandwidth for USB3\n"); + + /* + * Calculate available bandwidth for the first hop USB3 tunnel. + * That determines the whole USB3 bandwidth for this branch. + */ + ret = tb_available_bandwidth(tb, tunnel->src_port, tunnel->dst_port, + &available_up, &available_down); + if (ret) { + tb_warn(tb, "failed to calculate available bandwidth\n"); + return; + } + + tb_dbg(tb, "available bandwidth for USB3 %d/%d Mb/s\n", + available_up, available_down); + + tb_tunnel_reclaim_available_bandwidth(tunnel, &available_up, &available_down); } static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) { struct tb_switch *parent = tb_switch_parent(sw); + int ret, available_up, available_down; struct tb_port *up, *down, *port; struct tb_cm *tcm = tb_priv(tb); struct tb_tunnel *tunnel; + if (!tb_acpi_may_tunnel_usb3()) { + tb_dbg(tb, "USB3 tunneling disabled, not creating tunnel\n"); + return 0; + } + up = tb_switch_find_port(sw, TB_TYPE_USB3_UP); if (!up) return 0; + if (!sw->link_usb4) + return 0; + /* * Look up available down port. Since we are chaining it should * be found right above this switch. @@ -254,21 +513,48 @@ static int tb_tunnel_usb3(struct tb *tb, struct tb_switch *sw) parent_up = tb_switch_find_port(parent, TB_TYPE_USB3_UP); if (!parent_up || !tb_port_is_enabled(parent_up)) return 0; + + /* Make all unused bandwidth available for the new tunnel */ + ret = tb_release_unused_usb3_bandwidth(tb, down, up); + if (ret) + return ret; } - tunnel = tb_tunnel_alloc_usb3(tb, up, down); - if (!tunnel) - return -ENOMEM; + ret = tb_available_bandwidth(tb, down, up, &available_up, + &available_down); + if (ret) + goto err_reclaim; + + tb_port_dbg(up, "available bandwidth for new USB3 tunnel %d/%d Mb/s\n", + available_up, available_down); + + tunnel = tb_tunnel_alloc_usb3(tb, up, down, available_up, + available_down); + if (!tunnel) { + ret = -ENOMEM; + goto err_reclaim; + } if (tb_tunnel_activate(tunnel)) { tb_port_info(up, "USB3 tunnel activation failed, aborting\n"); - tb_tunnel_free(tunnel); - return -EIO; + ret = -EIO; + goto err_free; } list_add_tail(&tunnel->list, &tcm->tunnel_list); + if (tb_route(parent)) + tb_reclaim_usb3_bandwidth(tb, down, up); + return 0; + +err_free: + tb_tunnel_free(tunnel); +err_reclaim: + if (tb_route(parent)) + tb_reclaim_usb3_bandwidth(tb, down, up); + + return ret; } static int tb_create_usb3_tunnels(struct tb_switch *sw) @@ -276,6 +562,9 @@ static int tb_create_usb3_tunnels(struct tb_switch *sw) struct tb_port *port; int ret; + if (!tb_acpi_may_tunnel_usb3()) + return 0; + if (tb_route(sw)) { ret = tb_tunnel_usb3(sw->tb, sw); if (ret) @@ -295,18 +584,23 @@ static int tb_create_usb3_tunnels(struct tb_switch *sw) static void tb_scan_port(struct tb_port *port); -/** +/* * tb_scan_switch() - scan for and initialize downstream switches */ static void tb_scan_switch(struct tb_switch *sw) { struct tb_port *port; + pm_runtime_get_sync(&sw->dev); + tb_switch_for_each_port(sw, port) tb_scan_port(port); + + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); } -/** +/* * tb_scan_port() - check for and initialize switches below port */ static void tb_scan_port(struct tb_port *port) @@ -314,6 +608,7 @@ static void tb_scan_port(struct tb_port *port) struct tb_cm *tcm = tb_priv(port->sw->tb); struct tb_port *upstream_port; struct tb_switch *sw; + int ret; if (tb_is_upstream_port(port)) return; @@ -339,6 +634,9 @@ static void tb_scan_port(struct tb_port *port) tb_port_dbg(port, "port already has a remote\n"); return; } + + tb_retimer_scan(port, true); + sw = tb_switch_alloc(port->sw->tb, &port->sw->dev, tb_downstream_route(port)); if (IS_ERR(sw)) { @@ -363,6 +661,7 @@ static void tb_scan_port(struct tb_port *port) */ if (port->xdomain) { tb_xdomain_remove(port->xdomain); + tb_port_unconfigure_xdomain(port); port->xdomain = NULL; } @@ -374,6 +673,12 @@ static void tb_scan_port(struct tb_port *port) if (!tcm->hotplug_active) dev_set_uevent_suppress(&sw->dev, true); + /* + * At the moment Thunderbolt 2 and beyond (devices with LC) we + * can support runtime PM. + */ + sw->rpm = sw->generation > 1; + if (tb_switch_add(sw)) { tb_switch_put(sw); return; @@ -389,12 +694,34 @@ static void tb_scan_port(struct tb_port *port) } /* Enable lane bonding if supported */ - if (tb_switch_lane_bonding_enable(sw)) - tb_sw_warn(sw, "failed to enable lane bonding\n"); + tb_switch_lane_bonding_enable(sw); + /* Set the link configured */ + tb_switch_configure_link(sw); + /* + * CL0s and CL1 are enabled and supported together. + * Silently ignore CLx enabling in case CLx is not supported. + */ + ret = tb_switch_enable_clx(sw, TB_CL1); + if (ret && ret != -EOPNOTSUPP) + tb_sw_warn(sw, "failed to enable %s on upstream port\n", + tb_switch_clx_name(TB_CL1)); + + if (tb_switch_is_clx_enabled(sw, TB_CL1)) + /* + * To support highest CLx state, we set router's TMU to + * Normal-Uni mode. + */ + tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true); + else + /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/ + tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false); if (tb_enable_tmu(sw)) tb_sw_warn(sw, "failed to enable TMU\n"); + /* Scan upstream retimers */ + tb_retimer_scan(upstream_port, true); + /* * Create USB 3.x tunnels only when the switch is plugged to the * domain. This is because we scan the domain also during discovery @@ -404,49 +731,55 @@ static void tb_scan_port(struct tb_port *port) if (tcm->hotplug_active && tb_tunnel_usb3(sw->tb, sw)) tb_sw_warn(sw, "USB3 tunnel creation failed\n"); + tb_add_dp_resources(sw); tb_scan_switch(sw); } -static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, - struct tb_port *src_port, - struct tb_port *dst_port) -{ - struct tb_cm *tcm = tb_priv(tb); - struct tb_tunnel *tunnel; - - list_for_each_entry(tunnel, &tcm->tunnel_list, list) { - if (tunnel->type == type && - ((src_port && src_port == tunnel->src_port) || - (dst_port && dst_port == tunnel->dst_port))) { - return tunnel; - } - } - - return NULL; -} - static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) { + struct tb_port *src_port, *dst_port; + struct tb *tb; + if (!tunnel) return; tb_tunnel_deactivate(tunnel); list_del(&tunnel->list); - /* - * In case of DP tunnel make sure the DP IN resource is deallocated - * properly. - */ - if (tb_tunnel_is_dp(tunnel)) { - struct tb_port *in = tunnel->src_port; + tb = tunnel->tb; + src_port = tunnel->src_port; + dst_port = tunnel->dst_port; - tb_switch_dealloc_dp_resource(in->sw, in); + switch (tunnel->type) { + case TB_TUNNEL_DP: + /* + * In case of DP tunnel make sure the DP IN resource is + * deallocated properly. + */ + tb_switch_dealloc_dp_resource(src_port->sw, src_port); + /* Now we can allow the domain to runtime suspend again */ + pm_runtime_mark_last_busy(&dst_port->sw->dev); + pm_runtime_put_autosuspend(&dst_port->sw->dev); + pm_runtime_mark_last_busy(&src_port->sw->dev); + pm_runtime_put_autosuspend(&src_port->sw->dev); + fallthrough; + + case TB_TUNNEL_USB3: + tb_reclaim_usb3_bandwidth(tb, src_port, dst_port); + break; + + default: + /* + * PCIe and DMA tunnels do not consume guaranteed + * bandwidth. + */ + break; } tb_tunnel_free(tunnel); } -/** +/* * tb_free_invalid_tunnels() - destroy tunnels of devices that have gone away */ static void tb_free_invalid_tunnels(struct tb *tb) @@ -461,7 +794,7 @@ static void tb_free_invalid_tunnels(struct tb *tb) } } -/** +/* * tb_free_unplugged_children() - traverse hierarchy and free unplugged switches */ static void tb_free_unplugged_children(struct tb_switch *sw) @@ -473,7 +806,9 @@ static void tb_free_unplugged_children(struct tb_switch *sw) continue; if (port->remote->sw->is_unplugged) { + tb_retimer_remove_all(port); tb_remove_dp_resources(port->remote->sw); + tb_switch_unconfigure_link(port->remote->sw); tb_switch_lane_bonding_disable(port->remote->sw); tb_switch_remove(port->remote->sw); port->remote = NULL; @@ -524,7 +859,7 @@ static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, if (down) { if (WARN_ON(!tb_port_is_pcie_down(down))) goto out; - if (WARN_ON(tb_pci_port_is_enabled(down))) + if (tb_pci_port_is_enabled(down)) goto out; return down; @@ -534,51 +869,54 @@ out: return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); } -static int tb_available_bw(struct tb_cm *tcm, struct tb_port *in, - struct tb_port *out) +static struct tb_port *tb_find_dp_out(struct tb *tb, struct tb_port *in) { - struct tb_switch *sw = out->sw; - struct tb_tunnel *tunnel; - int bw, available_bw = 40000; + struct tb_port *host_port, *port; + struct tb_cm *tcm = tb_priv(tb); - while (sw && sw != in->sw) { - bw = sw->link_speed * sw->link_width * 1000; /* Mb/s */ - /* Leave 10% guard band */ - bw -= bw / 10; + host_port = tb_route(in->sw) ? + tb_port_at(tb_route(in->sw), tb->root_switch) : NULL; + + list_for_each_entry(port, &tcm->dp_resources, list) { + if (!tb_port_is_dpout(port)) + continue; + + if (tb_port_is_enabled(port)) { + tb_port_dbg(port, "in use\n"); + continue; + } + + tb_port_dbg(port, "DP OUT available\n"); /* - * Check for any active DP tunnels that go through this - * switch and reduce their consumed bandwidth from - * available. + * Keep the DP tunnel under the topology starting from + * the same host router downstream port. */ - list_for_each_entry(tunnel, &tcm->tunnel_list, list) { - int consumed_bw; + if (host_port && tb_route(port->sw)) { + struct tb_port *p; - if (!tb_tunnel_switch_on_path(tunnel, sw)) + p = tb_port_at(tb_route(port->sw), tb->root_switch); + if (p != host_port) continue; - - consumed_bw = tb_tunnel_consumed_bandwidth(tunnel); - if (consumed_bw < 0) - return consumed_bw; - - bw -= consumed_bw; } - if (bw < available_bw) - available_bw = bw; - - sw = tb_switch_parent(sw); + return port; } - return available_bw; + return NULL; } static void tb_tunnel_dp(struct tb *tb) { + int available_up, available_down, ret, link_nr; struct tb_cm *tcm = tb_priv(tb); struct tb_port *port, *in, *out; struct tb_tunnel *tunnel; - int available_bw; + + if (!tb_acpi_may_tunnel_dp()) { + tb_dbg(tb, "DP tunneling disabled, not creating tunnel\n"); + return; + } /* * Find pair of inactive DP IN and DP OUT adapters and then @@ -589,17 +927,21 @@ static void tb_tunnel_dp(struct tb *tb) in = NULL; out = NULL; list_for_each_entry(port, &tcm->dp_resources, list) { + if (!tb_port_is_dpin(port)) + continue; + if (tb_port_is_enabled(port)) { tb_port_dbg(port, "in use\n"); continue; } - tb_port_dbg(port, "available\n"); + tb_port_dbg(port, "DP IN available\n"); - if (!in && tb_port_is_dpin(port)) + out = tb_find_dp_out(tb, port); + if (out) { in = port; - else if (!out && tb_port_is_dpout(port)) - out = port; + break; + } } if (!in) { @@ -611,38 +953,84 @@ static void tb_tunnel_dp(struct tb *tb) return; } + /* + * This is only applicable to links that are not bonded (so + * when Thunderbolt 1 hardware is involved somewhere in the + * topology). For these try to share the DP bandwidth between + * the two lanes. + */ + link_nr = 1; + list_for_each_entry(tunnel, &tcm->tunnel_list, list) { + if (tb_tunnel_is_dp(tunnel)) { + link_nr = 0; + break; + } + } + + /* + * DP stream needs the domain to be active so runtime resume + * both ends of the tunnel. + * + * This should bring the routers in the middle active as well + * and keeps the domain from runtime suspending while the DP + * tunnel is active. + */ + pm_runtime_get_sync(&in->sw->dev); + pm_runtime_get_sync(&out->sw->dev); + if (tb_switch_alloc_dp_resource(in->sw, in)) { tb_port_dbg(in, "no resource available for DP IN, not tunneling\n"); - return; + goto err_rpm_put; } - /* Calculate available bandwidth between in and out */ - available_bw = tb_available_bw(tcm, in, out); - if (available_bw < 0) { - tb_warn(tb, "failed to determine available bandwidth\n"); - return; + /* Make all unused USB3 bandwidth available for the new DP tunnel */ + ret = tb_release_unused_usb3_bandwidth(tb, in, out); + if (ret) { + tb_warn(tb, "failed to release unused bandwidth\n"); + goto err_dealloc_dp; } - tb_dbg(tb, "available bandwidth for new DP tunnel %u Mb/s\n", - available_bw); + ret = tb_available_bandwidth(tb, in, out, &available_up, + &available_down); + if (ret) + goto err_reclaim; + + tb_dbg(tb, "available bandwidth for new DP tunnel %u/%u Mb/s\n", + available_up, available_down); - tunnel = tb_tunnel_alloc_dp(tb, in, out, available_bw); + tunnel = tb_tunnel_alloc_dp(tb, in, out, link_nr, available_up, + available_down); if (!tunnel) { tb_port_dbg(out, "could not allocate DP tunnel\n"); - goto dealloc_dp; + goto err_reclaim; } if (tb_tunnel_activate(tunnel)) { tb_port_info(out, "DP tunnel activation failed, aborting\n"); - tb_tunnel_free(tunnel); - goto dealloc_dp; + goto err_free; } list_add_tail(&tunnel->list, &tcm->tunnel_list); + tb_reclaim_usb3_bandwidth(tb, in, out); + /* + * In case of DP tunnel exists, change host router's 1st children + * TMU mode to HiFi for CL0s to work. + */ + tb_switch_enable_tmu_1st_child(tb->root_switch, TB_SWITCH_TMU_RATE_HIFI); + return; -dealloc_dp: +err_free: + tb_tunnel_free(tunnel); +err_reclaim: + tb_reclaim_usb3_bandwidth(tb, in, out); +err_dealloc_dp: tb_switch_dealloc_dp_resource(in->sw, in); +err_rpm_put: + pm_runtime_mark_last_busy(&out->sw->dev); + pm_runtime_put_autosuspend(&out->sw->dev); + pm_runtime_mark_last_busy(&in->sw->dev); + pm_runtime_put_autosuspend(&in->sw->dev); } static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) @@ -692,6 +1080,50 @@ static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) tb_tunnel_dp(tb); } +static void tb_disconnect_and_release_dp(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel, *n; + + /* + * Tear down all DP tunnels and release their resources. They + * will be re-established after resume based on plug events. + */ + list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) { + if (tb_tunnel_is_dp(tunnel)) + tb_deactivate_and_free_tunnel(tunnel); + } + + while (!list_empty(&tcm->dp_resources)) { + struct tb_port *port; + + port = list_first_entry(&tcm->dp_resources, + struct tb_port, list); + list_del_init(&port->list); + } +} + +static int tb_disconnect_pci(struct tb *tb, struct tb_switch *sw) +{ + struct tb_tunnel *tunnel; + struct tb_port *up; + + up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); + if (WARN_ON(!up)) + return -ENODEV; + + tunnel = tb_find_tunnel(tb, TB_TUNNEL_PCI, NULL, up); + if (WARN_ON(!tunnel)) + return -ENODEV; + + tb_switch_xhci_disconnect(sw); + + tb_tunnel_deactivate(tunnel); + list_del(&tunnel->list); + tb_tunnel_free(tunnel); + return 0; +} + static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) { struct tb_port *up, *down, *port; @@ -724,11 +1156,23 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) return -EIO; } + /* + * PCIe L1 is needed to enable CL0s for Titan Ridge so enable it + * here. + */ + if (tb_switch_pcie_l1_enable(sw)) + tb_sw_warn(sw, "failed to enable PCIe L1 for Titan Ridge\n"); + + if (tb_switch_xhci_connect(sw)) + tb_sw_warn(sw, "failed to connect xHCI\n"); + list_add_tail(&tunnel->list, &tcm->tunnel_list); return 0; } -static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) +static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring) { struct tb_cm *tcm = tb_priv(tb); struct tb_port *nhi_port, *dst_port; @@ -740,9 +1184,8 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); mutex_lock(&tb->lock); - tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring, - xd->transmit_path, xd->receive_ring, - xd->receive_path); + tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, transmit_path, + transmit_ring, receive_path, receive_ring); if (!tunnel) { mutex_unlock(&tb->lock); return -ENOMEM; @@ -761,29 +1204,40 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) return 0; } -static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) +static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring) { - struct tb_port *dst_port; - struct tb_tunnel *tunnel; + struct tb_cm *tcm = tb_priv(tb); + struct tb_port *nhi_port, *dst_port; + struct tb_tunnel *tunnel, *n; struct tb_switch *sw; sw = tb_to_switch(xd->dev.parent); dst_port = tb_port_at(xd->route, sw); + nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); - /* - * It is possible that the tunnel was already teared down (in - * case of cable disconnect) so it is fine if we cannot find it - * here anymore. - */ - tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port); - tb_deactivate_and_free_tunnel(tunnel); + list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { + if (!tb_tunnel_is_dma(tunnel)) + continue; + if (tunnel->src_port != nhi_port || tunnel->dst_port != dst_port) + continue; + + if (tb_tunnel_match_dma(tunnel, transmit_path, transmit_ring, + receive_path, receive_ring)) + tb_deactivate_and_free_tunnel(tunnel); + } } -static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) +static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring) { if (!xd->is_unplugged) { mutex_lock(&tb->lock); - __tb_disconnect_xdomain_paths(tb, xd); + __tb_disconnect_xdomain_paths(tb, xd, transmit_path, + transmit_ring, receive_path, + receive_ring); mutex_unlock(&tb->lock); } return 0; @@ -791,7 +1245,7 @@ static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) /* hotplug handling */ -/** +/* * tb_handle_hotplug() - handle hotplug event * * Executes on tb->wq. @@ -803,6 +1257,10 @@ static void tb_handle_hotplug(struct work_struct *work) struct tb_cm *tcm = tb_priv(tb); struct tb_switch *sw; struct tb_port *port; + + /* Bring the domain back from sleep if it was suspended */ + pm_runtime_get_sync(&tb->dev); + mutex_lock(&tb->lock); if (!tcm->hotplug_active) goto out; /* during init, suspend or shutdown */ @@ -826,13 +1284,19 @@ static void tb_handle_hotplug(struct work_struct *work) ev->route, ev->port, ev->unplug); goto put_sw; } + + pm_runtime_get_sync(&sw->dev); + if (ev->unplug) { + tb_retimer_remove_all(port); + if (tb_port_has_remote(port)) { tb_port_dbg(port, "switch unplugged\n"); tb_sw_set_unplugged(port->remote->sw); tb_free_invalid_tunnels(tb); tb_remove_dp_resources(port->remote->sw); tb_switch_tmu_disable(port->remote->sw); + tb_switch_unconfigure_link(port->remote->sw); tb_switch_lane_bonding_disable(port->remote->sw); tb_switch_remove(port->remote->sw); port->remote = NULL; @@ -849,21 +1313,28 @@ static void tb_handle_hotplug(struct work_struct *work) * tb_xdomain_remove() so setting XDomain as * unplugged here prevents deadlock if they call * tb_xdomain_disable_paths(). We will tear down - * the path below. + * all the tunnels below. */ xd->is_unplugged = true; tb_xdomain_remove(xd); port->xdomain = NULL; - __tb_disconnect_xdomain_paths(tb, xd); + __tb_disconnect_xdomain_paths(tb, xd, -1, -1, -1, -1); tb_xdomain_put(xd); + tb_port_unconfigure_xdomain(port); } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { tb_dp_resource_unavailable(tb, port); + } else if (!port->port) { + tb_sw_dbg(sw, "xHCI disconnect request\n"); + tb_switch_xhci_disconnect(sw); } else { tb_port_dbg(port, "got unplug event for disconnected port, ignoring\n"); } } else if (port->remote) { tb_port_dbg(port, "got plug event for connected port, ignoring\n"); + } else if (!port->port && sw->authorized) { + tb_sw_dbg(sw, "xHCI connect request\n"); + tb_switch_xhci_connect(sw); } else { if (tb_port_is_null(port)) { tb_port_dbg(port, "hotplug: scanning\n"); @@ -875,14 +1346,21 @@ static void tb_handle_hotplug(struct work_struct *work) } } + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + put_sw: tb_switch_put(sw); out: mutex_unlock(&tb->lock); + + pm_runtime_mark_last_busy(&tb->dev); + pm_runtime_put_autosuspend(&tb->dev); + kfree(ev); } -/** +/* * tb_schedule_hotplug_handler() - callback function for the control channel * * Delegates to tb_handle_hotplug. @@ -914,6 +1392,7 @@ static void tb_stop(struct tb *tb) struct tb_tunnel *tunnel; struct tb_tunnel *n; + cancel_delayed_work(&tcm->remove_work); /* tunnels are only present after everything has been initialized */ list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { /* @@ -963,8 +1442,13 @@ static int tb_start(struct tb *tb) * ICM firmware upgrade needs running firmware and in native * mode that is not available so disable firmware upgrade of the * root switch. + * + * However, USB4 routers support NVM firmware upgrade if they + * implement the necessary router operations. */ - tb->root_switch->no_nvm_upgrade = true; + tb->root_switch->no_nvm_upgrade = !tb_switch_is_usb4(tb->root_switch); + /* All USB4 routers support runtime PM */ + tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch); ret = tb_switch_configure(tb->root_switch); if (ret) { @@ -979,12 +1463,20 @@ static int tb_start(struct tb *tb) return ret; } + /* + * To support highest CLx state, we set host router's TMU to + * Normal mode. + */ + tb_switch_tmu_configure(tb->root_switch, TB_SWITCH_TMU_RATE_NORMAL, + false); /* Enable TMU if it is off */ tb_switch_tmu_enable(tb->root_switch); /* Full scan to discover devices added before the driver was loaded. */ tb_scan_switch(tb->root_switch); /* Find out tunnels created by the boot firmware */ - tb_discover_tunnels(tb->root_switch); + tb_discover_tunnels(tb); + /* Add DP resources from the DP tunnels created by the boot firmware */ + tb_discover_dp_resources(tb); /* * If the boot firmware did not create USB 3.x tunnels create them * now for the whole topology. @@ -1006,7 +1498,8 @@ static int tb_suspend_noirq(struct tb *tb) struct tb_cm *tcm = tb_priv(tb); tb_dbg(tb, "suspending...\n"); - tb_switch_suspend(tb->root_switch); + tb_disconnect_and_release_dp(tb); + tb_switch_suspend(tb->root_switch, false); tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ tb_dbg(tb, "suspend finished\n"); @@ -1016,18 +1509,46 @@ static int tb_suspend_noirq(struct tb *tb) static void tb_restore_children(struct tb_switch *sw) { struct tb_port *port; + int ret; + + /* No need to restore if the router is already unplugged */ + if (sw->is_unplugged) + return; + + /* + * CL0s and CL1 are enabled and supported together. + * Silently ignore CLx re-enabling in case CLx is not supported. + */ + ret = tb_switch_enable_clx(sw, TB_CL1); + if (ret && ret != -EOPNOTSUPP) + tb_sw_warn(sw, "failed to re-enable %s on upstream port\n", + tb_switch_clx_name(TB_CL1)); + + if (tb_switch_is_clx_enabled(sw, TB_CL1)) + /* + * To support highest CLx state, we set router's TMU to + * Normal-Uni mode. + */ + tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_NORMAL, true); + else + /* If CLx disabled, configure router's TMU to HiFi-Bidir mode*/ + tb_switch_tmu_configure(sw, TB_SWITCH_TMU_RATE_HIFI, false); if (tb_enable_tmu(sw)) tb_sw_warn(sw, "failed to restore TMU configuration\n"); tb_switch_for_each_port(sw, port) { - if (!tb_port_has_remote(port)) + if (!tb_port_has_remote(port) && !port->xdomain) continue; - if (tb_switch_lane_bonding_enable(port->remote->sw)) - dev_warn(&sw->dev, "failed to restore lane bonding\n"); + if (port->remote) { + tb_switch_lane_bonding_enable(port->remote->sw); + tb_switch_configure_link(port->remote->sw); - tb_restore_children(port->remote->sw); + tb_restore_children(port->remote->sw); + } else if (port->xdomain) { + tb_port_configure_xdomain(port, port->xdomain); + } } } @@ -1035,18 +1556,43 @@ static int tb_resume_noirq(struct tb *tb) { struct tb_cm *tcm = tb_priv(tb); struct tb_tunnel *tunnel, *n; + unsigned int usb3_delay = 0; + LIST_HEAD(tunnels); tb_dbg(tb, "resuming...\n"); /* remove any pci devices the firmware might have setup */ - tb_switch_reset(tb, 0); + tb_switch_reset(tb->root_switch); tb_switch_resume(tb->root_switch); tb_free_invalid_tunnels(tb); tb_free_unplugged_children(tb->root_switch); tb_restore_children(tb->root_switch); - list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) + + /* + * If we get here from suspend to disk the boot firmware or the + * restore kernel might have created tunnels of its own. Since + * we cannot be sure they are usable for us we find and tear + * them down. + */ + tb_switch_discover_tunnels(tb->root_switch, &tunnels, false); + list_for_each_entry_safe_reverse(tunnel, n, &tunnels, list) { + if (tb_tunnel_is_usb3(tunnel)) + usb3_delay = 500; + tb_tunnel_deactivate(tunnel); + tb_tunnel_free(tunnel); + } + + /* Re-create our tunnels now */ + list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { + /* USB3 requires delay before it can be re-activated */ + if (tb_tunnel_is_usb3(tunnel)) { + msleep(usb3_delay); + /* Only need to do it once */ + usb3_delay = 0; + } tb_tunnel_restart(tunnel); + } if (!list_empty(&tcm->tunnel_list)) { /* * the pcie links need some time to get going. @@ -1071,7 +1617,9 @@ static int tb_free_unplugged_xdomains(struct tb_switch *sw) if (tb_is_upstream_port(port)) continue; if (port->xdomain && port->xdomain->is_unplugged) { + tb_retimer_remove_all(port); tb_xdomain_remove(port->xdomain); + tb_port_unconfigure_xdomain(port); port->xdomain = NULL; ret++; } else if (port->remote) { @@ -1082,6 +1630,22 @@ static int tb_free_unplugged_xdomains(struct tb_switch *sw) return ret; } +static int tb_freeze_noirq(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + + tcm->hotplug_active = false; + return 0; +} + +static int tb_thaw_noirq(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + + tcm->hotplug_active = true; + return 0; +} + static void tb_complete(struct tb *tb) { /* @@ -1095,33 +1659,159 @@ static void tb_complete(struct tb *tb) mutex_unlock(&tb->lock); } +static int tb_runtime_suspend(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + + mutex_lock(&tb->lock); + tb_switch_suspend(tb->root_switch, true); + tcm->hotplug_active = false; + mutex_unlock(&tb->lock); + + return 0; +} + +static void tb_remove_work(struct work_struct *work) +{ + struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work); + struct tb *tb = tcm_to_tb(tcm); + + mutex_lock(&tb->lock); + if (tb->root_switch) { + tb_free_unplugged_children(tb->root_switch); + tb_free_unplugged_xdomains(tb->root_switch); + } + mutex_unlock(&tb->lock); +} + +static int tb_runtime_resume(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel, *n; + + mutex_lock(&tb->lock); + tb_switch_resume(tb->root_switch); + tb_free_invalid_tunnels(tb); + tb_restore_children(tb->root_switch); + list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) + tb_tunnel_restart(tunnel); + tcm->hotplug_active = true; + mutex_unlock(&tb->lock); + + /* + * Schedule cleanup of any unplugged devices. Run this in a + * separate thread to avoid possible deadlock if the device + * removal runtime resumes the unplugged device. + */ + queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50)); + return 0; +} + static const struct tb_cm_ops tb_cm_ops = { .start = tb_start, .stop = tb_stop, .suspend_noirq = tb_suspend_noirq, .resume_noirq = tb_resume_noirq, + .freeze_noirq = tb_freeze_noirq, + .thaw_noirq = tb_thaw_noirq, .complete = tb_complete, + .runtime_suspend = tb_runtime_suspend, + .runtime_resume = tb_runtime_resume, .handle_event = tb_handle_event, + .disapprove_switch = tb_disconnect_pci, .approve_switch = tb_tunnel_pci, .approve_xdomain_paths = tb_approve_xdomain_paths, .disconnect_xdomain_paths = tb_disconnect_xdomain_paths, }; +/* + * During suspend the Thunderbolt controller is reset and all PCIe + * tunnels are lost. The NHI driver will try to reestablish all tunnels + * during resume. This adds device links between the tunneled PCIe + * downstream ports and the NHI so that the device core will make sure + * NHI is resumed first before the rest. + */ +static void tb_apple_add_links(struct tb_nhi *nhi) +{ + struct pci_dev *upstream, *pdev; + + if (!x86_apple_machine) + return; + + switch (nhi->pdev->device) { + case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: + case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: + break; + default: + return; + } + + upstream = pci_upstream_bridge(nhi->pdev); + while (upstream) { + if (!pci_is_pcie(upstream)) + return; + if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM) + break; + upstream = pci_upstream_bridge(upstream); + } + + if (!upstream) + return; + + /* + * For each hotplug downstream port, create add device link + * back to NHI so that PCIe tunnels can be re-established after + * sleep. + */ + for_each_pci_bridge(pdev, upstream->subordinate) { + const struct device_link *link; + + if (!pci_is_pcie(pdev)) + continue; + if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM || + !pdev->is_hotplug_bridge) + continue; + + link = device_link_add(&pdev->dev, &nhi->pdev->dev, + DL_FLAG_AUTOREMOVE_SUPPLIER | + DL_FLAG_PM_RUNTIME); + if (link) { + dev_dbg(&nhi->pdev->dev, "created link from %s\n", + dev_name(&pdev->dev)); + } else { + dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n", + dev_name(&pdev->dev)); + } + } +} + struct tb *tb_probe(struct tb_nhi *nhi) { struct tb_cm *tcm; struct tb *tb; - tb = tb_domain_alloc(nhi, sizeof(*tcm)); + tb = tb_domain_alloc(nhi, TB_TIMEOUT, sizeof(*tcm)); if (!tb) return NULL; - tb->security_level = TB_SECURITY_USER; + if (tb_acpi_may_tunnel_pcie()) + tb->security_level = TB_SECURITY_USER; + else + tb->security_level = TB_SECURITY_NOPCIE; + tb->cm_ops = &tb_cm_ops; tcm = tb_priv(tb); INIT_LIST_HEAD(&tcm->tunnel_list); INIT_LIST_HEAD(&tcm->dp_resources); + INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work); + + tb_dbg(tb, "using software connection manager\n"); + + tb_apple_add_links(nhi); + tb_acpi_add_links(nhi); return tb; } diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 2eb2bcd3cca3..f9786976f5ec 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -13,33 +13,58 @@ #include <linux/pci.h> #include <linux/thunderbolt.h> #include <linux/uuid.h> +#include <linux/bitfield.h> #include "tb_regs.h" #include "ctl.h" #include "dma_port.h" +#define NVM_MIN_SIZE SZ_32K +#define NVM_MAX_SIZE SZ_512K +#define NVM_DATA_DWORDS 16 + /** - * struct tb_switch_nvm - Structure holding switch NVM information + * struct tb_nvm - Structure holding NVM information + * @dev: Owner of the NVM * @major: Major version number of the active NVM portion * @minor: Minor version number of the active NVM portion * @id: Identifier used with both NVM portions * @active: Active portion NVMem device + * @active_size: Size in bytes of the active NVM * @non_active: Non-active portion NVMem device * @buf: Buffer where the NVM image is stored before it is written to * the actual NVM flash device + * @buf_data_start: Where the actual image starts after skipping + * possible headers * @buf_data_size: Number of bytes actually consumed by the new NVM * image - * @authenticating: The switch is authenticating the new NVM + * @authenticating: The device is authenticating the new NVM + * @flushed: The image has been flushed to the storage area + * @vops: Router vendor specific NVM operations (optional) + * + * The user of this structure needs to handle serialization of possible + * concurrent access. */ -struct tb_switch_nvm { - u8 major; - u8 minor; +struct tb_nvm { + struct device *dev; + u32 major; + u32 minor; int id; struct nvmem_device *active; + size_t active_size; struct nvmem_device *non_active; void *buf; + void *buf_data_start; size_t buf_data_size; bool authenticating; + bool flushed; + const struct tb_nvm_vendor_ops *vops; +}; + +enum tb_nvm_write_ops { + WRITE_AND_AUTHENTICATE = 1, + WRITE_ONLY = 2, + AUTHENTICATE_ONLY = 3, }; #define TB_SWITCH_KEY_SIZE 32 @@ -67,15 +92,31 @@ enum tb_switch_tmu_rate { * @cap: Offset to the TMU capability (%0 if not found) * @has_ucap: Does the switch support uni-directional mode * @rate: TMU refresh rate related to upstream switch. In case of root - * switch this holds the domain rate. + * switch this holds the domain rate. Reflects the HW setting. * @unidirectional: Is the TMU in uni-directional or bi-directional mode - * related to upstream switch. Don't case for root switch. + * related to upstream switch. Don't care for root switch. + * Reflects the HW setting. + * @unidirectional_request: Is the new TMU mode: uni-directional or bi-directional + * that is requested to be set. Related to upstream switch. + * Don't care for root switch. + * @rate_request: TMU new refresh rate related to upstream switch that is + * requested to be set. In case of root switch, this holds + * the new domain rate that is requested to be set. */ struct tb_switch_tmu { int cap; bool has_ucap; enum tb_switch_tmu_rate rate; bool unidirectional; + bool unidirectional_request; + enum tb_switch_tmu_rate rate_request; +}; + +enum tb_clx { + TB_CLX_DISABLE, + /* CL0s and CL1 are enabled and supported together */ + TB_CL1 = BIT(0), + TB_CL2 = BIT(1), }; /** @@ -97,9 +138,12 @@ struct tb_switch_tmu { * @device_name: Name of the device (or %NULL if not known) * @link_speed: Speed of the link in Gb/s * @link_width: Width of the link (1 or 2) + * @link_usb4: Upstream link is USB4 * @generation: Switch Thunderbolt generation * @cap_plug_events: Offset to the plug events capability (%0 if not found) + * @cap_vsec_tmu: Offset to the TMU vendor specific capability (%0 if not found) * @cap_lc: Offset to the link controller capability (%0 if not found) + * @cap_lp: Offset to the low power (CLx for TBT) capability (%0 if not found) * @is_unplugged: The switch is going away * @drom: DROM of the switch (%NULL if not found) * @nvm: Pointer to the NVM if the switch has one (%NULL otherwise) @@ -109,6 +153,7 @@ struct tb_switch_tmu { * @rpm: The switch supports runtime PM * @authorized: Whether the switch is authorized by user or policy * @security_level: Switch supported security level + * @debugfs_dir: Pointer to the debugfs structure * @key: Contains the key used to challenge the device or %NULL if not * supported. Size of the key is %TB_SWITCH_KEY_SIZE. * @connection_id: Connection ID used with ICM messaging @@ -117,9 +162,19 @@ struct tb_switch_tmu { * @depth: Depth in the chain this switch is connected (ICM only) * @rpm_complete: Completion used to wait for runtime resume to * complete (ICM only) + * @quirks: Quirks used for this Thunderbolt switch + * @credit_allocation: Are the below buffer allocation parameters valid + * @max_usb3_credits: Router preferred number of buffers for USB 3.x + * @min_dp_aux_credits: Router preferred minimum number of buffers for DP AUX + * @min_dp_main_credits: Router preferred minimum number of buffers for DP MAIN + * @max_pcie_credits: Router preferred number of buffers for PCIe + * @max_dma_credits: Router preferred number of buffers for DMA/P2P + * @clx: CLx state on the upstream link of the router * * When the switch is being added or removed to the domain (other * switches) you need to have domain lock held. + * + * In USB4 terminology this structure represents a router. */ struct tb_switch { struct device dev; @@ -136,24 +191,36 @@ struct tb_switch { const char *device_name; unsigned int link_speed; unsigned int link_width; + bool link_usb4; unsigned int generation; int cap_plug_events; + int cap_vsec_tmu; int cap_lc; + int cap_lp; bool is_unplugged; u8 *drom; - struct tb_switch_nvm *nvm; + struct tb_nvm *nvm; bool no_nvm_upgrade; bool safe_mode; bool boot; bool rpm; unsigned int authorized; enum tb_security_level security_level; + struct dentry *debugfs_dir; u8 *key; u8 connection_id; u8 connection_key; u8 link; u8 depth; struct completion rpm_complete; + unsigned long quirks; + bool credit_allocation; + unsigned int max_usb3_credits; + unsigned int min_dp_aux_credits; + unsigned int min_dp_main_credits; + unsigned int max_pcie_credits; + unsigned int max_dma_credits; + enum tb_clx clx; }; /** @@ -166,8 +233,9 @@ struct tb_switch { * @cap_tmu: Offset of the adapter specific TMU capability (%0 if not present) * @cap_adap: Offset of the adapter specific capability (%0 if not present) * @cap_usb4: Offset to the USB4 port capability (%0 if not present) + * @usb4: Pointer to the USB4 port structure (only if @cap_usb4 is != %0) * @port: Port number on switch - * @disabled: Disabled by eeprom + * @disabled: Disabled by eeprom or enabled but not implemented * @bonded: true if the port is bonded (two lanes combined as one) * @dual_link_port: If the switch is connected using two ports, points * to the other port. @@ -175,6 +243,13 @@ struct tb_switch { * @in_hopids: Currently allocated input HopIDs * @out_hopids: Currently allocated output HopIDs * @list: Used to link ports to DP resources list + * @total_credits: Total number of buffers available for this port + * @ctl_credits: Buffers reserved for control path + * @dma_credits: Number of credits allocated for DMA tunneling for all + * DMA paths through this port. + * + * In USB4 terminology this structure represents an adapter (protocol or + * lane adapter). */ struct tb_port { struct tb_regs_port_header config; @@ -185,6 +260,7 @@ struct tb_port { int cap_tmu; int cap_adap; int cap_usb4; + struct usb4_port *usb4; u8 port; bool disabled; bool bonded; @@ -193,6 +269,52 @@ struct tb_port { struct ida in_hopids; struct ida out_hopids; struct list_head list; + unsigned int total_credits; + unsigned int ctl_credits; + unsigned int dma_credits; +}; + +/** + * struct usb4_port - USB4 port device + * @dev: Device for the port + * @port: Pointer to the lane 0 adapter + * @can_offline: Does the port have necessary platform support to moved + * it into offline mode and back + * @offline: The port is currently in offline mode + * @margining: Pointer to margining structure if enabled + */ +struct usb4_port { + struct device dev; + struct tb_port *port; + bool can_offline; + bool offline; +#ifdef CONFIG_USB4_DEBUGFS_MARGINING + struct tb_margining *margining; +#endif +}; + +/** + * tb_retimer: Thunderbolt retimer + * @dev: Device for the retimer + * @tb: Pointer to the domain the retimer belongs to + * @index: Retimer index facing the router USB4 port + * @vendor: Vendor ID of the retimer + * @device: Device ID of the retimer + * @port: Pointer to the lane 0 adapter + * @nvm: Pointer to the NVM if the retimer has one (%NULL otherwise) + * @no_nvm_upgrade: Prevent NVM upgrade of this retimer + * @auth_status: Status of last NVM authentication + */ +struct tb_retimer { + struct device dev; + struct tb *tb; + u8 index; + u32 vendor; + u32 device; + struct tb_port *port; + struct tb_nvm *nvm; + bool no_nvm_upgrade; + u32 auth_status; }; /** @@ -207,6 +329,8 @@ struct tb_port { * @next_hop_index: HopID of the packet when it is routed out from @out_port * @initial_credits: Number of initial flow control credits allocated for * the path + * @nfc_credits: Number of non-flow controlled buffers allocated for the + * @in_port. * * Hop configuration is always done on the IN port of a switch. * in_port and out_port have to be on the same switch. Packets arriving on @@ -226,6 +350,7 @@ struct tb_path_hop { int in_counter_index; int next_hop_index; unsigned int initial_credits; + unsigned int nfc_credits; }; /** @@ -248,7 +373,6 @@ enum tb_path_port { * struct tb_path - a unidirectional path between two ports * @tb: Pointer to the domain structure * @name: Name of the path (used for debugging) - * @nfc_credits: Number of non flow controlled credits allocated for the path * @ingress_shared_buffer: Shared buffering used for ingress ports on the path * @egress_shared_buffer: Shared buffering used for egress ports on the path * @ingress_fc_enable: Flow control for ingress ports on the path @@ -261,6 +385,7 @@ enum tb_path_port { * when deactivating this path * @hops: Path hops * @path_length: How many hops the path uses + * @alloc_hopid: Does this path consume port HopID * * A path consists of a number of hops (see &struct tb_path_hop). To * establish a PCIe tunnel two paths have to be created between the two @@ -269,7 +394,6 @@ enum tb_path_port { struct tb_path { struct tb *tb; const char *name; - int nfc_credits; enum tb_path_port ingress_shared_buffer; enum tb_path_port egress_shared_buffer; enum tb_path_port ingress_fc_enable; @@ -282,11 +406,24 @@ struct tb_path { bool clear_fc; struct tb_path_hop *hops; int path_length; + bool alloc_hopid; }; /* HopIDs 0-7 are reserved by the Thunderbolt protocol */ #define TB_PATH_MIN_HOPID 8 -#define TB_PATH_MAX_HOPS 7 +/* + * Support paths from the farthest (depth 6) router to the host and back + * to the same level (not necessarily to the same router). + */ +#define TB_PATH_MAX_HOPS (7 * 2) + +/* Possible wake types */ +#define TB_WAKE_ON_CONNECT BIT(0) +#define TB_WAKE_ON_DISCONNECT BIT(1) +#define TB_WAKE_ON_USB4 BIT(2) +#define TB_WAKE_ON_USB3 BIT(3) +#define TB_WAKE_ON_PCIE BIT(4) +#define TB_WAKE_ON_DP BIT(5) /** * struct tb_cm_ops - Connection manager specific operations vector @@ -297,6 +434,8 @@ struct tb_path { * @suspend_noirq: Connection manager specific suspend_noirq * @resume_noirq: Connection manager specific resume_noirq * @suspend: Connection manager specific suspend + * @freeze_noirq: Connection manager specific freeze_noirq + * @thaw_noirq: Connection manager specific thaw_noirq * @complete: Connection manager specific complete * @runtime_suspend: Connection manager specific runtime_suspend * @runtime_resume: Connection manager specific runtime_resume @@ -305,12 +444,21 @@ struct tb_path { * @handle_event: Handle thunderbolt event * @get_boot_acl: Get boot ACL list * @set_boot_acl: Set boot ACL list + * @disapprove_switch: Disapprove switch (disconnect PCIe tunnel) * @approve_switch: Approve switch * @add_switch_key: Add key to switch * @challenge_switch_key: Challenge switch using key * @disconnect_pcie_paths: Disconnects PCIe paths before NVM update * @approve_xdomain_paths: Approve (establish) XDomain DMA paths * @disconnect_xdomain_paths: Disconnect XDomain DMA paths + * @usb4_switch_op: Optional proxy for USB4 router operations. If set + * this will be called whenever USB4 router operation is + * performed. If this returns %-EOPNOTSUPP then the + * native USB4 router operation is called. + * @usb4_switch_nvm_authenticate_status: Optional callback that the CM + * implementation can be used to + * return status of USB4 NVM_AUTH + * router operation. */ struct tb_cm_ops { int (*driver_ready)(struct tb *tb); @@ -319,6 +467,8 @@ struct tb_cm_ops { int (*suspend_noirq)(struct tb *tb); int (*resume_noirq)(struct tb *tb); int (*suspend)(struct tb *tb); + int (*freeze_noirq)(struct tb *tb); + int (*thaw_noirq)(struct tb *tb); void (*complete)(struct tb *tb); int (*runtime_suspend)(struct tb *tb); int (*runtime_resume)(struct tb *tb); @@ -328,13 +478,23 @@ struct tb_cm_ops { const void *buf, size_t size); int (*get_boot_acl)(struct tb *tb, uuid_t *uuids, size_t nuuids); int (*set_boot_acl)(struct tb *tb, const uuid_t *uuids, size_t nuuids); + int (*disapprove_switch)(struct tb *tb, struct tb_switch *sw); int (*approve_switch)(struct tb *tb, struct tb_switch *sw); int (*add_switch_key)(struct tb *tb, struct tb_switch *sw); int (*challenge_switch_key)(struct tb *tb, struct tb_switch *sw, const u8 *challenge, u8 *response); int (*disconnect_pcie_paths)(struct tb *tb); - int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd); - int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd); + int (*approve_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring); + int (*disconnect_xdomain_paths)(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring); + int (*usb4_switch_op)(struct tb_switch *sw, u16 opcode, u32 *metadata, + u8 *status, const void *tx_data, size_t tx_data_len, + void *rx_data, size_t rx_data_len); + int (*usb4_switch_nvm_authenticate_status)(struct tb_switch *sw, + u32 *status); }; static inline void *tb_priv(struct tb *tb) @@ -412,6 +572,11 @@ static inline bool tb_port_is_null(const struct tb_port *port) return port && port->port && port->config.type == TB_TYPE_PORT; } +static inline bool tb_port_is_nhi(const struct tb_port *port) +{ + return port && port->config.type == TB_TYPE_NHI; +} + static inline bool tb_port_is_pcie_down(const struct tb_port *port) { return port && port->config.type == TB_TYPE_PCIE_DOWN; @@ -518,7 +683,7 @@ static inline int tb_port_write(struct tb_port *port, const void *buffer, #define __TB_PORT_PRINT(level, _port, fmt, arg...) \ do { \ const struct tb_port *__port = (_port); \ - level(__port->sw->tb, "%llx:%x: " fmt, \ + level(__port->sw->tb, "%llx:%u: " fmt, \ tb_route(__port->sw), __port->port, ## arg); \ } while (0) #define tb_port_WARN(port, fmt, arg...) \ @@ -534,29 +699,37 @@ struct tb *icm_probe(struct tb_nhi *nhi); struct tb *tb_probe(struct tb_nhi *nhi); extern struct device_type tb_domain_type; +extern struct device_type tb_retimer_type; extern struct device_type tb_switch_type; +extern struct device_type usb4_port_device_type; int tb_domain_init(void); void tb_domain_exit(void); -void tb_switch_exit(void); int tb_xdomain_init(void); void tb_xdomain_exit(void); -struct tb *tb_domain_alloc(struct tb_nhi *nhi, size_t privsize); +struct tb *tb_domain_alloc(struct tb_nhi *nhi, int timeout_msec, size_t privsize); int tb_domain_add(struct tb *tb); void tb_domain_remove(struct tb *tb); int tb_domain_suspend_noirq(struct tb *tb); int tb_domain_resume_noirq(struct tb *tb); int tb_domain_suspend(struct tb *tb); +int tb_domain_freeze_noirq(struct tb *tb); +int tb_domain_thaw_noirq(struct tb *tb); void tb_domain_complete(struct tb *tb); int tb_domain_runtime_suspend(struct tb *tb); int tb_domain_runtime_resume(struct tb *tb); +int tb_domain_disapprove_switch(struct tb *tb, struct tb_switch *sw); int tb_domain_approve_switch(struct tb *tb, struct tb_switch *sw); int tb_domain_approve_switch_key(struct tb *tb, struct tb_switch *sw); int tb_domain_challenge_switch_key(struct tb *tb, struct tb_switch *sw); int tb_domain_disconnect_pcie_paths(struct tb *tb); -int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd); -int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd); +int tb_domain_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring); +int tb_domain_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd, + int transmit_path, int transmit_ring, + int receive_path, int receive_ring); int tb_domain_disconnect_all_paths(struct tb *tb); static inline struct tb *tb_domain_get(struct tb *tb) @@ -571,6 +744,29 @@ static inline void tb_domain_put(struct tb *tb) put_device(&tb->dev); } +struct tb_nvm *tb_nvm_alloc(struct device *dev); +int tb_nvm_read_version(struct tb_nvm *nvm); +int tb_nvm_validate(struct tb_nvm *nvm); +int tb_nvm_write_headers(struct tb_nvm *nvm); +int tb_nvm_add_active(struct tb_nvm *nvm, nvmem_reg_read_t reg_read); +int tb_nvm_write_buf(struct tb_nvm *nvm, unsigned int offset, void *val, + size_t bytes); +int tb_nvm_add_non_active(struct tb_nvm *nvm, nvmem_reg_write_t reg_write); +void tb_nvm_free(struct tb_nvm *nvm); +void tb_nvm_exit(void); + +typedef int (*read_block_fn)(void *, unsigned int, void *, size_t); +typedef int (*write_block_fn)(void *, unsigned int, const void *, size_t); + +int tb_nvm_read_data(unsigned int address, void *buf, size_t size, + unsigned int retries, read_block_fn read_block, + void *read_block_data); +int tb_nvm_write_data(unsigned int address, const void *buf, size_t size, + unsigned int retries, write_block_fn write_next_block, + void *write_block_data); + +int tb_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, + size_t size); struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, u64 route); struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb, @@ -578,9 +774,11 @@ struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb, int tb_switch_configure(struct tb_switch *sw); int tb_switch_add(struct tb_switch *sw); void tb_switch_remove(struct tb_switch *sw); -void tb_switch_suspend(struct tb_switch *sw); +void tb_switch_suspend(struct tb_switch *sw, bool runtime); int tb_switch_resume(struct tb_switch *sw); -int tb_switch_reset(struct tb *tb, u64 route); +int tb_switch_reset(struct tb_switch *sw); +int tb_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, + u32 value, int timeout_msec); void tb_sw_set_unplugged(struct tb_switch *sw); struct tb_port *tb_switch_find_port(struct tb_switch *sw, enum tb_port_type type); @@ -631,59 +829,80 @@ static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw) static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw) { - return sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE; + return sw->config.vendor_id == PCI_VENDOR_ID_INTEL && + sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE; } static inline bool tb_switch_is_eagle_ridge(const struct tb_switch *sw) { - return sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE; + return sw->config.vendor_id == PCI_VENDOR_ID_INTEL && + sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE; } static inline bool tb_switch_is_cactus_ridge(const struct tb_switch *sw) { - switch (sw->config.device_id) { - case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: - case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: - return true; - default: - return false; + if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: + case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: + return true; + } } + return false; } static inline bool tb_switch_is_falcon_ridge(const struct tb_switch *sw) { - switch (sw->config.device_id) { - case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: - case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: - return true; - default: - return false; + if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: + return true; + } } + return false; } static inline bool tb_switch_is_alpine_ridge(const struct tb_switch *sw) { - switch (sw->config.device_id) { - case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: - case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: - case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: - case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: - return true; - default: - return false; + if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: + return true; + } } + return false; } static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw) { - switch (sw->config.device_id) { - case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: - case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: - case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: - return true; - default: - return false; + if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: + return true; + } + } + return false; +} + +static inline bool tb_switch_is_tiger_lake(const struct tb_switch *sw) +{ + if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_TGL_NHI0: + case PCI_DEVICE_ID_INTEL_TGL_NHI1: + case PCI_DEVICE_ID_INTEL_TGL_H_NHI0: + case PCI_DEVICE_ID_INTEL_TGL_H_NHI1: + return true; + } } + return false; } /** @@ -713,6 +932,8 @@ static inline bool tb_switch_is_icm(const struct tb_switch *sw) int tb_switch_lane_bonding_enable(struct tb_switch *sw); void tb_switch_lane_bonding_disable(struct tb_switch *sw); +int tb_switch_configure_link(struct tb_switch *sw); +void tb_switch_unconfigure_link(struct tb_switch *sw); bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in); int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in); @@ -722,18 +943,77 @@ int tb_switch_tmu_init(struct tb_switch *sw); int tb_switch_tmu_post_time(struct tb_switch *sw); int tb_switch_tmu_disable(struct tb_switch *sw); int tb_switch_tmu_enable(struct tb_switch *sw); +void tb_switch_tmu_configure(struct tb_switch *sw, + enum tb_switch_tmu_rate rate, + bool unidirectional); +void tb_switch_enable_tmu_1st_child(struct tb_switch *sw, + enum tb_switch_tmu_rate rate); +/** + * tb_switch_tmu_is_enabled() - Checks if the specified TMU mode is enabled + * @sw: Router whose TMU mode to check + * @unidirectional: If uni-directional (bi-directional otherwise) + * + * Return true if hardware TMU configuration matches the one passed in + * as parameter. That is HiFi/Normal and either uni-directional or bi-directional. + */ +static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw, + bool unidirectional) +{ + return sw->tmu.rate == sw->tmu.rate_request && + sw->tmu.unidirectional == unidirectional; +} -static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw) +static inline const char *tb_switch_clx_name(enum tb_clx clx) { - return sw->tmu.rate == TB_SWITCH_TMU_RATE_HIFI && - !sw->tmu.unidirectional; + switch (clx) { + /* CL0s and CL1 are enabled and supported together */ + case TB_CL1: + return "CL0s/CL1"; + default: + return "unknown"; + } } +int tb_switch_enable_clx(struct tb_switch *sw, enum tb_clx clx); +int tb_switch_disable_clx(struct tb_switch *sw, enum tb_clx clx); + +/** + * tb_switch_is_clx_enabled() - Checks if the CLx is enabled + * @sw: Router to check for the CLx + * @clx: The CLx state to check for + * + * Checks if the specified CLx is enabled on the router upstream link. + * Not applicable for a host router. + */ +static inline bool tb_switch_is_clx_enabled(const struct tb_switch *sw, + enum tb_clx clx) +{ + return sw->clx == clx; +} + +/** + * tb_switch_is_clx_supported() - Is CLx supported on this type of router + * @sw: The router to check CLx support for + */ +static inline bool tb_switch_is_clx_supported(const struct tb_switch *sw) +{ + return tb_switch_is_usb4(sw) || tb_switch_is_titan_ridge(sw); +} + +int tb_switch_mask_clx_objections(struct tb_switch *sw); + +int tb_switch_pcie_l1_enable(struct tb_switch *sw); + +int tb_switch_xhci_connect(struct tb_switch *sw); +void tb_switch_xhci_disconnect(struct tb_switch *sw); + +int tb_port_state(struct tb_port *port); int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged); int tb_port_add_nfc_credits(struct tb_port *port, int credits); -int tb_port_set_initial_credits(struct tb_port *port, u32 credits); int tb_port_clear_counter(struct tb_port *port, int counter); int tb_port_unlock(struct tb_port *port); +int tb_port_enable(struct tb_port *port); +int tb_port_disable(struct tb_port *port); int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid); void tb_port_release_in_hopid(struct tb_port *port, int hopid); int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid); @@ -741,9 +1021,39 @@ void tb_port_release_out_hopid(struct tb_port *port, int hopid); struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, struct tb_port *prev); +static inline bool tb_port_use_credit_allocation(const struct tb_port *port) +{ + return tb_port_is_null(port) && port->sw->credit_allocation; +} + +/** + * tb_for_each_port_on_path() - Iterate over each port on path + * @src: Source port + * @dst: Destination port + * @p: Port used as iterator + * + * Walks over each port on path from @src to @dst. + */ +#define tb_for_each_port_on_path(src, dst, p) \ + for ((p) = tb_next_port_on_path((src), (dst), NULL); (p); \ + (p) = tb_next_port_on_path((src), (dst), (p))) + +int tb_port_get_link_speed(struct tb_port *port); +int tb_port_get_link_width(struct tb_port *port); +int tb_port_set_link_width(struct tb_port *port, unsigned int width); +int tb_port_set_lane_bonding(struct tb_port *port, bool bonding); +int tb_port_lane_bonding_enable(struct tb_port *port); +void tb_port_lane_bonding_disable(struct tb_port *port); +int tb_port_wait_for_link_width(struct tb_port *port, int width, + int timeout_msec); +int tb_port_update_credits(struct tb_port *port); +bool tb_port_is_clx_enabled(struct tb_port *port, enum tb_clx clx); + int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec); int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap); +int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset); int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap); +int tb_port_next_cap(struct tb_port *port, unsigned int offset); bool tb_port_is_enabled(struct tb_port *port); bool tb_usb3_port_is_enabled(struct tb_port *port); @@ -761,7 +1071,8 @@ int tb_dp_port_enable(struct tb_port *port, bool enable); struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, struct tb_port *dst, int dst_hopid, - struct tb_port **last, const char *name); + struct tb_port **last, const char *name, + bool alloc_hopid); struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, struct tb_port *dst, int dst_hopid, int link_nr, const char *name); @@ -769,20 +1080,41 @@ void tb_path_free(struct tb_path *path); int tb_path_activate(struct tb_path *path); void tb_path_deactivate(struct tb_path *path); bool tb_path_is_invalid(struct tb_path *path); -bool tb_path_switch_on_path(const struct tb_path *path, - const struct tb_switch *sw); +bool tb_path_port_on_path(const struct tb_path *path, + const struct tb_port *port); + +/** + * tb_path_for_each_hop() - Iterate over each hop on path + * @path: Path whose hops to iterate + * @hop: Hop used as iterator + * + * Iterates over each hop on path. + */ +#define tb_path_for_each_hop(path, hop) \ + for ((hop) = &(path)->hops[0]; \ + (hop) <= &(path)->hops[(path)->path_length - 1]; (hop)++) int tb_drom_read(struct tb_switch *sw); int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid); int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid); -int tb_lc_configure_link(struct tb_switch *sw); -void tb_lc_unconfigure_link(struct tb_switch *sw); +int tb_lc_configure_port(struct tb_port *port); +void tb_lc_unconfigure_port(struct tb_port *port); +int tb_lc_configure_xdomain(struct tb_port *port); +void tb_lc_unconfigure_xdomain(struct tb_port *port); +int tb_lc_start_lane_initialization(struct tb_port *port); +bool tb_lc_is_clx_supported(struct tb_port *port); +bool tb_lc_is_usb_plugged(struct tb_port *port); +bool tb_lc_is_xhci_connected(struct tb_port *port); +int tb_lc_xhci_connect(struct tb_port *port); +void tb_lc_xhci_disconnect(struct tb_port *port); +int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags); int tb_lc_set_sleep(struct tb_switch *sw); bool tb_lc_lane_bonding_possible(struct tb_switch *sw); bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in); int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in); int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in); +int tb_lc_force_power(struct tb_switch *sw); static inline int tb_route_length(u64 route) { @@ -802,6 +1134,7 @@ static inline u64 tb_downstream_route(struct tb_port *port) | ((u64) port->port << (port->sw->config.depth * 8)); } +bool tb_is_xdomain_enabled(void); bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type, const void *buf, size_t size); struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, @@ -812,20 +1145,44 @@ void tb_xdomain_remove(struct tb_xdomain *xd); struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link, u8 depth); +static inline struct tb_switch *tb_xdomain_parent(struct tb_xdomain *xd) +{ + return tb_to_switch(xd->dev.parent); +} + +int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf, + size_t size); +int tb_retimer_scan(struct tb_port *port, bool add); +void tb_retimer_remove_all(struct tb_port *port); + +static inline bool tb_is_retimer(const struct device *dev) +{ + return dev->type == &tb_retimer_type; +} + +static inline struct tb_retimer *tb_to_retimer(struct device *dev) +{ + if (tb_is_retimer(dev)) + return container_of(dev, struct tb_retimer, dev); + return NULL; +} + int usb4_switch_setup(struct tb_switch *sw); int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid); int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, size_t size); -int usb4_switch_configure_link(struct tb_switch *sw); -void usb4_switch_unconfigure_link(struct tb_switch *sw); bool usb4_switch_lane_bonding_possible(struct tb_switch *sw); +int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags); int usb4_switch_set_sleep(struct tb_switch *sw); int usb4_switch_nvm_sector_size(struct tb_switch *sw); int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, size_t size); +int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address); int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address, const void *buf, size_t size); int usb4_switch_nvm_authenticate(struct tb_switch *sw); +int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status); +int usb4_switch_credits_init(struct tb_switch *sw); bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in); int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in); int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in); @@ -833,6 +1190,121 @@ struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, const struct tb_port *port); struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw, const struct tb_port *port); +int usb4_switch_add_ports(struct tb_switch *sw); +void usb4_switch_remove_ports(struct tb_switch *sw); int usb4_port_unlock(struct tb_port *port); +int usb4_port_hotplug_enable(struct tb_port *port); +int usb4_port_configure(struct tb_port *port); +void usb4_port_unconfigure(struct tb_port *port); +int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd); +void usb4_port_unconfigure_xdomain(struct tb_port *port); +int usb4_port_router_offline(struct tb_port *port); +int usb4_port_router_online(struct tb_port *port); +int usb4_port_enumerate_retimers(struct tb_port *port); +bool usb4_port_clx_supported(struct tb_port *port); +int usb4_port_margining_caps(struct tb_port *port, u32 *caps); +int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes, + unsigned int ber_level, bool timing, bool right_high, + u32 *results); +int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing, + bool right_high, u32 counter); +int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors); + +int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index); +int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf, + u8 size); +int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg, + const void *buf, u8 size); +int usb4_port_retimer_is_last(struct tb_port *port, u8 index); +int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index); +int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index, + unsigned int address); +int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, + unsigned int address, const void *buf, + size_t size); +int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index); +int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index, + u32 *status); +int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index, + unsigned int address, void *buf, size_t size); + +int usb4_usb3_port_max_link_rate(struct tb_port *port); +int usb4_usb3_port_actual_link_rate(struct tb_port *port); +int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw, + int *downstream_bw); +int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw, + int *downstream_bw); +int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw, + int *downstream_bw); + +static inline bool tb_is_usb4_port_device(const struct device *dev) +{ + return dev->type == &usb4_port_device_type; +} + +static inline struct usb4_port *tb_to_usb4_port_device(struct device *dev) +{ + if (tb_is_usb4_port_device(dev)) + return container_of(dev, struct usb4_port, dev); + return NULL; +} + +struct usb4_port *usb4_port_device_add(struct tb_port *port); +void usb4_port_device_remove(struct usb4_port *usb4); +int usb4_port_device_resume(struct usb4_port *usb4); + +/* Keep link controller awake during update */ +#define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0) + +void tb_check_quirks(struct tb_switch *sw); + +#ifdef CONFIG_ACPI +void tb_acpi_add_links(struct tb_nhi *nhi); + +bool tb_acpi_is_native(void); +bool tb_acpi_may_tunnel_usb3(void); +bool tb_acpi_may_tunnel_dp(void); +bool tb_acpi_may_tunnel_pcie(void); +bool tb_acpi_is_xdomain_allowed(void); + +int tb_acpi_init(void); +void tb_acpi_exit(void); +int tb_acpi_power_on_retimers(struct tb_port *port); +int tb_acpi_power_off_retimers(struct tb_port *port); +#else +static inline void tb_acpi_add_links(struct tb_nhi *nhi) { } + +static inline bool tb_acpi_is_native(void) { return true; } +static inline bool tb_acpi_may_tunnel_usb3(void) { return true; } +static inline bool tb_acpi_may_tunnel_dp(void) { return true; } +static inline bool tb_acpi_may_tunnel_pcie(void) { return true; } +static inline bool tb_acpi_is_xdomain_allowed(void) { return true; } + +static inline int tb_acpi_init(void) { return 0; } +static inline void tb_acpi_exit(void) { } +static inline int tb_acpi_power_on_retimers(struct tb_port *port) { return 0; } +static inline int tb_acpi_power_off_retimers(struct tb_port *port) { return 0; } +#endif + +#ifdef CONFIG_DEBUG_FS +void tb_debugfs_init(void); +void tb_debugfs_exit(void); +void tb_switch_debugfs_init(struct tb_switch *sw); +void tb_switch_debugfs_remove(struct tb_switch *sw); +void tb_xdomain_debugfs_init(struct tb_xdomain *xd); +void tb_xdomain_debugfs_remove(struct tb_xdomain *xd); +void tb_service_debugfs_init(struct tb_service *svc); +void tb_service_debugfs_remove(struct tb_service *svc); +#else +static inline void tb_debugfs_init(void) { } +static inline void tb_debugfs_exit(void) { } +static inline void tb_switch_debugfs_init(struct tb_switch *sw) { } +static inline void tb_switch_debugfs_remove(struct tb_switch *sw) { } +static inline void tb_xdomain_debugfs_init(struct tb_xdomain *xd) { } +static inline void tb_xdomain_debugfs_remove(struct tb_xdomain *xd) { } +static inline void tb_service_debugfs_init(struct tb_service *svc) { } +static inline void tb_service_debugfs_remove(struct tb_service *svc) { } +#endif + #endif diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h index fc208c567953..33c4c7aed56d 100644 --- a/drivers/thunderbolt/tb_msgs.h +++ b/drivers/thunderbolt/tb_msgs.h @@ -28,6 +28,7 @@ enum tb_cfg_error { TB_CFG_ERROR_LOOP = 8, TB_CFG_ERROR_HEC_ERROR_DETECTED = 12, TB_CFG_ERROR_FLOW_CONTROL_ERROR = 13, + TB_CFG_ERROR_LOCK = 15, }; /* common header */ @@ -105,6 +106,7 @@ enum icm_pkg_code { ICM_APPROVE_XDOMAIN = 0x10, ICM_DISCONNECT_XDOMAIN = 0x11, ICM_PREBOOT_ACL = 0x18, + ICM_USB4_SWITCH_OP = 0x20, }; enum icm_event_code { @@ -342,6 +344,8 @@ struct icm_tr_pkg_driver_ready_response { #define ICM_TR_FLAGS_RTD3 BIT(6) #define ICM_TR_INFO_SLEVEL_MASK GENMASK(2, 0) +#define ICM_TR_INFO_PROTO_VERSION_MASK GENMASK(6, 4) +#define ICM_TR_INFO_PROTO_VERSION_SHIFT 4 #define ICM_TR_INFO_BOOT_ACL_SHIFT 7 #define ICM_TR_INFO_BOOT_ACL_MASK GENMASK(12, 7) @@ -477,6 +481,31 @@ struct icm_icl_event_rtd3_veto { u32 veto_reason; }; +/* USB4 ICM messages */ + +struct icm_usb4_switch_op { + struct icm_pkg_header hdr; + u32 route_hi; + u32 route_lo; + u32 metadata; + u16 opcode; + u16 data_len_valid; + u32 data[16]; +}; + +#define ICM_USB4_SWITCH_DATA_LEN_MASK GENMASK(3, 0) +#define ICM_USB4_SWITCH_DATA_VALID BIT(4) + +struct icm_usb4_switch_op_response { + struct icm_pkg_header hdr; + u32 route_hi; + u32 route_lo; + u32 metadata; + u16 opcode; + u16 status; + u32 data[16]; +}; + /* XDomain messages */ struct tb_xdomain_header { @@ -498,6 +527,10 @@ enum tb_xdp_type { PROPERTIES_CHANGED_RESPONSE, ERROR_RESPONSE, UUID_REQUEST = 12, + LINK_STATE_STATUS_REQUEST = 15, + LINK_STATE_STATUS_RESPONSE, + LINK_STATE_CHANGE_REQUEST, + LINK_STATE_CHANGE_RESPONSE, }; struct tb_xdp_header { @@ -506,15 +539,60 @@ struct tb_xdp_header { u32 type; }; +struct tb_xdp_error_response { + struct tb_xdp_header hdr; + u32 error; +}; + +struct tb_xdp_link_state_status { + struct tb_xdp_header hdr; +}; + +struct tb_xdp_link_state_status_response { + union { + struct tb_xdp_error_response err; + struct { + struct tb_xdp_header hdr; + u32 status; + u8 slw; + u8 tlw; + u8 sls; + u8 tls; + }; + }; +}; + +struct tb_xdp_link_state_change { + struct tb_xdp_header hdr; + u8 tlw; + u8 tls; + u16 reserved; +}; + +struct tb_xdp_link_state_change_response { + union { + struct tb_xdp_error_response err; + struct { + struct tb_xdp_header hdr; + u32 status; + }; + }; +}; + struct tb_xdp_uuid { struct tb_xdp_header hdr; }; struct tb_xdp_uuid_response { - struct tb_xdp_header hdr; - uuid_t src_uuid; - u32 src_route_hi; - u32 src_route_lo; + union { + struct tb_xdp_error_response err; + struct { + struct tb_xdp_header hdr; + uuid_t src_uuid; + u32 src_route_hi; + u32 src_route_lo; + }; + }; }; struct tb_xdp_properties { @@ -526,13 +604,18 @@ struct tb_xdp_properties { }; struct tb_xdp_properties_response { - struct tb_xdp_header hdr; - uuid_t src_uuid; - uuid_t dst_uuid; - u16 offset; - u16 data_length; - u32 generation; - u32 data[0]; + union { + struct tb_xdp_error_response err; + struct { + struct tb_xdp_header hdr; + uuid_t src_uuid; + uuid_t dst_uuid; + u16 offset; + u16 data_length; + u32 generation; + u32 data[]; + }; + }; }; /* @@ -551,7 +634,10 @@ struct tb_xdp_properties_changed { }; struct tb_xdp_properties_changed_response { - struct tb_xdp_header hdr; + union { + struct tb_xdp_error_response err; + struct tb_xdp_header hdr; + }; }; enum tb_xdp_error { @@ -562,9 +648,4 @@ enum tb_xdp_error { ERROR_NOT_READY, }; -struct tb_xdp_error_response { - struct tb_xdp_header hdr; - u32 error; -}; - #endif diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index c29c5075525a..86319dca0f8c 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -33,12 +33,13 @@ enum tb_switch_cap { enum tb_switch_vse_cap { TB_VSE_CAP_PLUG_EVENTS = 0x01, /* also EEPROM */ TB_VSE_CAP_TIME2 = 0x03, - TB_VSE_CAP_IECS = 0x04, + TB_VSE_CAP_CP_LP = 0x04, TB_VSE_CAP_LINK_CONTROLLER = 0x06, /* also IECS */ }; enum tb_port_cap { TB_PORT_CAP_PHY = 0x01, + TB_PORT_CAP_POWER = 0x02, TB_PORT_CAP_TIME1 = 0x03, TB_PORT_CAP_ADAP = 0x04, TB_PORT_CAP_VSE = 0x05, @@ -93,6 +94,20 @@ struct tb_cap_extended_long { u16 length; } __packed; +/** + * struct tb_cap_any - Structure capable of hold every capability + * @basic: Basic capability + * @extended_short: Vendor specific capability + * @extended_long: Vendor specific extended capability + */ +struct tb_cap_any { + union { + struct tb_cap_basic basic; + struct tb_cap_extended_short extended_short; + struct tb_cap_extended_long extended_long; + }; +} __packed; + /* capabilities */ struct tb_cap_link_controller { @@ -118,11 +133,11 @@ struct tb_cap_phy { } __packed; struct tb_eeprom_ctl { - bool clock:1; /* send pulse to transfer one bit */ - bool access_low:1; /* set to 0 before access */ - bool data_out:1; /* to eeprom */ - bool data_in:1; /* from eeprom */ - bool access_high:1; /* set to 1 before access */ + bool fl_sk:1; /* send pulse to transfer one bit */ + bool fl_cs:1; /* set to 0 before access */ + bool fl_di:1; /* to eeprom */ + bool fl_do:1; /* from eeprom */ + bool bit_banging_enable:1; /* set to 1 before access */ bool not_present:1; /* should be 0 */ bool unknown1:1; bool present:1; /* should be 1 */ @@ -131,14 +146,14 @@ struct tb_eeprom_ctl { struct tb_cap_plug_events { struct tb_cap_extended_short cap_header; - u32 __unknown1:2; - u32 plug_events:5; - u32 __unknown2:25; - u32 __unknown3; - u32 __unknown4; + u32 __unknown1:2; /* VSC_CS_1 */ + u32 plug_events:5; /* VSC_CS_1 */ + u32 __unknown2:25; /* VSC_CS_1 */ + u32 vsc_cs_2; + u32 vsc_cs_3; struct tb_eeprom_ctl eeprom_ctl; - u32 __unknown5[7]; - u32 drom_offset; /* 32 bit register, but eeprom addresses are 16 bit */ + u32 __unknown5[7]; /* VSC_CS_5 -> VSC_CS_11 */ + u32 drom_offset; /* VSC_CS_12: 32 bit register, but eeprom addresses are 16 bit */ } __packed; /* device headers */ @@ -178,6 +193,9 @@ struct tb_regs_switch_header { #define ROUTER_CS_4 0x04 #define ROUTER_CS_5 0x05 #define ROUTER_CS_5_SLP BIT(0) +#define ROUTER_CS_5_WOP BIT(1) +#define ROUTER_CS_5_WOU BIT(2) +#define ROUTER_CS_5_WOD BIT(3) #define ROUTER_CS_5_C3S BIT(23) #define ROUTER_CS_5_PTO BIT(24) #define ROUTER_CS_5_UTO BIT(25) @@ -186,19 +204,37 @@ struct tb_regs_switch_header { #define ROUTER_CS_6 0x06 #define ROUTER_CS_6_SLPR BIT(0) #define ROUTER_CS_6_TNS BIT(1) +#define ROUTER_CS_6_WOPS BIT(2) +#define ROUTER_CS_6_WOUS BIT(3) #define ROUTER_CS_6_HCI BIT(18) #define ROUTER_CS_6_CR BIT(25) #define ROUTER_CS_7 0x07 #define ROUTER_CS_9 0x09 #define ROUTER_CS_25 0x19 #define ROUTER_CS_26 0x1a +#define ROUTER_CS_26_OPCODE_MASK GENMASK(15, 0) #define ROUTER_CS_26_STATUS_MASK GENMASK(29, 24) #define ROUTER_CS_26_STATUS_SHIFT 24 #define ROUTER_CS_26_ONS BIT(30) #define ROUTER_CS_26_OV BIT(31) +/* USB4 router operations opcodes */ +enum usb4_switch_op { + USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10, + USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11, + USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12, + USB4_SWITCH_OP_NVM_WRITE = 0x20, + USB4_SWITCH_OP_NVM_AUTH = 0x21, + USB4_SWITCH_OP_NVM_READ = 0x22, + USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23, + USB4_SWITCH_OP_DROM_READ = 0x24, + USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25, + USB4_SWITCH_OP_BUFFER_ALLOC = 0x33, +}; + /* Router TMU configuration */ #define TMU_RTR_CS_0 0x00 +#define TMU_RTR_CS_0_FREQ_WIND_MASK GENMASK(26, 16) #define TMU_RTR_CS_0_TD BIT(27) #define TMU_RTR_CS_0_UCAP BIT(30) #define TMU_RTR_CS_1 0x01 @@ -209,8 +245,14 @@ struct tb_regs_switch_header { #define TMU_RTR_CS_3_LOCAL_TIME_NS_MASK GENMASK(15, 0) #define TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK GENMASK(31, 16) #define TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT 16 +#define TMU_RTR_CS_15 0xf +#define TMU_RTR_CS_15_FREQ_AVG_MASK GENMASK(5, 0) +#define TMU_RTR_CS_15_DELAY_AVG_MASK GENMASK(11, 6) +#define TMU_RTR_CS_15_OFFSET_AVG_MASK GENMASK(17, 12) +#define TMU_RTR_CS_15_ERROR_AVG_MASK GENMASK(23, 18) #define TMU_RTR_CS_22 0x16 #define TMU_RTR_CS_24 0x18 +#define TMU_RTR_CS_25 0x19 enum tb_port_type { TB_TYPE_INACTIVE = 0x000000, @@ -234,7 +276,8 @@ struct tb_regs_port_header { /* DWORD 1 */ u32 first_cap_offset:8; u32 max_counters:11; - u32 __unknown1:5; + u32 counters_support:1; + u32 __unknown1:4; u32 revision:8; /* DWORD 2 */ enum tb_port_type type:24; @@ -265,20 +308,35 @@ struct tb_regs_port_header { #define ADP_CS_5 0x05 #define ADP_CS_5_LCA_MASK GENMASK(28, 22) #define ADP_CS_5_LCA_SHIFT 22 +#define ADP_CS_5_DHP BIT(31) /* TMU adapter registers */ #define TMU_ADP_CS_3 0x03 #define TMU_ADP_CS_3_UDM BIT(29) +#define TMU_ADP_CS_6 0x06 +#define TMU_ADP_CS_6_DTS BIT(1) /* Lane adapter registers */ #define LANE_ADP_CS_0 0x00 +#define LANE_ADP_CS_0_SUPPORTED_SPEED_MASK GENMASK(19, 16) +#define LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT 16 #define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK GENMASK(25, 20) #define LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT 20 +#define LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL 0x2 +#define LANE_ADP_CS_0_CL0S_SUPPORT BIT(26) +#define LANE_ADP_CS_0_CL1_SUPPORT BIT(27) +#define LANE_ADP_CS_0_CL2_SUPPORT BIT(28) #define LANE_ADP_CS_1 0x01 +#define LANE_ADP_CS_1_TARGET_SPEED_MASK GENMASK(3, 0) +#define LANE_ADP_CS_1_TARGET_SPEED_GEN3 0xc #define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4) #define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4 #define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1 #define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3 +#define LANE_ADP_CS_1_CL0S_ENABLE BIT(10) +#define LANE_ADP_CS_1_CL1_ENABLE BIT(11) +#define LANE_ADP_CS_1_CL2_ENABLE BIT(12) +#define LANE_ADP_CS_1_LD BIT(14) #define LANE_ADP_CS_1_LB BIT(15) #define LANE_ADP_CS_1_CURRENT_SPEED_MASK GENMASK(19, 16) #define LANE_ADP_CS_1_CURRENT_SPEED_SHIFT 16 @@ -286,12 +344,30 @@ struct tb_regs_port_header { #define LANE_ADP_CS_1_CURRENT_SPEED_GEN3 0x4 #define LANE_ADP_CS_1_CURRENT_WIDTH_MASK GENMASK(25, 20) #define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20 +#define LANE_ADP_CS_1_PMS BIT(30) /* USB4 port registers */ +#define PORT_CS_1 0x01 +#define PORT_CS_1_LENGTH_SHIFT 8 +#define PORT_CS_1_TARGET_MASK GENMASK(18, 16) +#define PORT_CS_1_TARGET_SHIFT 16 +#define PORT_CS_1_RETIMER_INDEX_SHIFT 20 +#define PORT_CS_1_WNR_WRITE BIT(24) +#define PORT_CS_1_NR BIT(25) +#define PORT_CS_1_RC BIT(26) +#define PORT_CS_1_PND BIT(31) +#define PORT_CS_2 0x02 #define PORT_CS_18 0x12 #define PORT_CS_18_BE BIT(8) +#define PORT_CS_18_TCM BIT(9) +#define PORT_CS_18_CPS BIT(10) +#define PORT_CS_18_WOU4S BIT(18) #define PORT_CS_19 0x13 #define PORT_CS_19_PC BIT(3) +#define PORT_CS_19_PID BIT(4) +#define PORT_CS_19_WOC BIT(16) +#define PORT_CS_19_WOD BIT(17) +#define PORT_CS_19_WOU4 BIT(18) /* Display Port adapter registers */ #define ADP_DP_CS_0 0x00 @@ -327,6 +403,7 @@ struct tb_regs_port_header { #define DP_COMMON_CAP_1_LANE 0x0 #define DP_COMMON_CAP_2_LANES 0x1 #define DP_COMMON_CAP_4_LANES 0x2 +#define DP_COMMON_CAP_LTTPR_NS BIT(27) #define DP_COMMON_CAP_DPRX_DONE BIT(31) /* PCIe adapter registers */ @@ -337,6 +414,25 @@ struct tb_regs_port_header { #define ADP_USB3_CS_0 0x00 #define ADP_USB3_CS_0_V BIT(30) #define ADP_USB3_CS_0_PE BIT(31) +#define ADP_USB3_CS_1 0x01 +#define ADP_USB3_CS_1_CUBW_MASK GENMASK(11, 0) +#define ADP_USB3_CS_1_CDBW_MASK GENMASK(23, 12) +#define ADP_USB3_CS_1_CDBW_SHIFT 12 +#define ADP_USB3_CS_1_HCA BIT(31) +#define ADP_USB3_CS_2 0x02 +#define ADP_USB3_CS_2_AUBW_MASK GENMASK(11, 0) +#define ADP_USB3_CS_2_ADBW_MASK GENMASK(23, 12) +#define ADP_USB3_CS_2_ADBW_SHIFT 12 +#define ADP_USB3_CS_2_CMR BIT(31) +#define ADP_USB3_CS_3 0x03 +#define ADP_USB3_CS_3_SCALE_MASK GENMASK(5, 0) +#define ADP_USB3_CS_4 0x04 +#define ADP_USB3_CS_4_ALR_MASK GENMASK(6, 0) +#define ADP_USB3_CS_4_ALR_20G 0x1 +#define ADP_USB3_CS_4_ULV BIT(7) +#define ADP_USB3_CS_4_MSLR_MASK GENMASK(18, 12) +#define ADP_USB3_CS_4_MSLR_SHIFT 12 +#define ADP_USB3_CS_4_MSLR_20G 0x1 /* Hop register from TB_CFG_HOPS. 8 byte per entry. */ struct tb_regs_hop { @@ -365,29 +461,91 @@ struct tb_regs_hop { u32 unknown3:3; /* set to zero */ } __packed; +/* TMU Thunderbolt 3 registers */ +#define TB_TIME_VSEC_3_CS_9 0x9 +#define TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK GENMASK(17, 16) +#define TB_TIME_VSEC_3_CS_26 0x1a +#define TB_TIME_VSEC_3_CS_26_TD BIT(22) + +/* + * Used for Titan Ridge only. Bits are part of the same register: TMU_ADP_CS_6 + * (see above) as in USB4 spec, but these specific bits used for Titan Ridge + * only and reserved in USB4 spec. + */ +#define TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK GENMASK(3, 2) +#define TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL1 BIT(2) +#define TMU_ADP_CS_6_DISABLE_TMU_OBJ_CL2 BIT(3) + +/* Plug Events registers */ +#define TB_PLUG_EVENTS_USB_DISABLE BIT(2) +#define TB_PLUG_EVENTS_CS_1_LANE_DISABLE BIT(3) +#define TB_PLUG_EVENTS_CS_1_DPOUT_DISABLE BIT(4) +#define TB_PLUG_EVENTS_CS_1_LOW_DPIN_DISABLE BIT(5) +#define TB_PLUG_EVENTS_CS_1_HIGH_DPIN_DISABLE BIT(6) + +#define TB_PLUG_EVENTS_PCIE_WR_DATA 0x1b +#define TB_PLUG_EVENTS_PCIE_CMD 0x1c +#define TB_PLUG_EVENTS_PCIE_CMD_DW_OFFSET_MASK GENMASK(9, 0) +#define TB_PLUG_EVENTS_PCIE_CMD_BR_SHIFT 10 +#define TB_PLUG_EVENTS_PCIE_CMD_BR_MASK GENMASK(17, 10) +#define TB_PLUG_EVENTS_PCIE_CMD_RD_WR_MASK BIT(21) +#define TB_PLUG_EVENTS_PCIE_CMD_WR 0x1 +#define TB_PLUG_EVENTS_PCIE_CMD_COMMAND_SHIFT 22 +#define TB_PLUG_EVENTS_PCIE_CMD_COMMAND_MASK GENMASK(24, 22) +#define TB_PLUG_EVENTS_PCIE_CMD_COMMAND_VAL 0x2 +#define TB_PLUG_EVENTS_PCIE_CMD_REQ_ACK_MASK BIT(30) +#define TB_PLUG_EVENTS_PCIE_CMD_TIMEOUT_MASK BIT(31) +#define TB_PLUG_EVENTS_PCIE_CMD_RD_DATA 0x1d + +/* CP Low Power registers */ +#define TB_LOW_PWR_C1_CL1 0x1 +#define TB_LOW_PWR_C1_CL1_OBJ_MASK GENMASK(4, 1) +#define TB_LOW_PWR_C1_CL2_OBJ_MASK GENMASK(4, 1) +#define TB_LOW_PWR_C1_PORT_A_MASK GENMASK(2, 1) +#define TB_LOW_PWR_C0_PORT_B_MASK GENMASK(4, 3) +#define TB_LOW_PWR_C3_CL1 0x3 + /* Common link controller registers */ -#define TB_LC_DESC 0x02 -#define TB_LC_DESC_NLC_MASK GENMASK(3, 0) -#define TB_LC_DESC_SIZE_SHIFT 8 -#define TB_LC_DESC_SIZE_MASK GENMASK(15, 8) -#define TB_LC_DESC_PORT_SIZE_SHIFT 16 -#define TB_LC_DESC_PORT_SIZE_MASK GENMASK(27, 16) -#define TB_LC_FUSE 0x03 -#define TB_LC_SNK_ALLOCATION 0x10 -#define TB_LC_SNK_ALLOCATION_SNK0_MASK GENMASK(3, 0) -#define TB_LC_SNK_ALLOCATION_SNK0_CM 0x1 -#define TB_LC_SNK_ALLOCATION_SNK1_SHIFT 4 -#define TB_LC_SNK_ALLOCATION_SNK1_MASK GENMASK(7, 4) -#define TB_LC_SNK_ALLOCATION_SNK1_CM 0x1 +#define TB_LC_DESC 0x02 +#define TB_LC_DESC_NLC_MASK GENMASK(3, 0) +#define TB_LC_DESC_SIZE_SHIFT 8 +#define TB_LC_DESC_SIZE_MASK GENMASK(15, 8) +#define TB_LC_DESC_PORT_SIZE_SHIFT 16 +#define TB_LC_DESC_PORT_SIZE_MASK GENMASK(27, 16) +#define TB_LC_FUSE 0x03 +#define TB_LC_SNK_ALLOCATION 0x10 +#define TB_LC_SNK_ALLOCATION_SNK0_MASK GENMASK(3, 0) +#define TB_LC_SNK_ALLOCATION_SNK0_CM 0x1 +#define TB_LC_SNK_ALLOCATION_SNK1_SHIFT 4 +#define TB_LC_SNK_ALLOCATION_SNK1_MASK GENMASK(7, 4) +#define TB_LC_SNK_ALLOCATION_SNK1_CM 0x1 +#define TB_LC_POWER 0x740 /* Link controller registers */ -#define TB_LC_PORT_ATTR 0x8d -#define TB_LC_PORT_ATTR_BE BIT(12) - -#define TB_LC_SX_CTRL 0x96 -#define TB_LC_SX_CTRL_L1C BIT(16) -#define TB_LC_SX_CTRL_L2C BIT(20) -#define TB_LC_SX_CTRL_UPSTREAM BIT(30) -#define TB_LC_SX_CTRL_SLP BIT(31) +#define TB_LC_CS_42 0x2a +#define TB_LC_CS_42_USB_PLUGGED BIT(31) + +#define TB_LC_PORT_ATTR 0x8d +#define TB_LC_PORT_ATTR_BE BIT(12) + +#define TB_LC_SX_CTRL 0x96 +#define TB_LC_SX_CTRL_WOC BIT(1) +#define TB_LC_SX_CTRL_WOD BIT(2) +#define TB_LC_SX_CTRL_WODPC BIT(3) +#define TB_LC_SX_CTRL_WODPD BIT(4) +#define TB_LC_SX_CTRL_WOU4 BIT(5) +#define TB_LC_SX_CTRL_WOP BIT(6) +#define TB_LC_SX_CTRL_L1C BIT(16) +#define TB_LC_SX_CTRL_L1D BIT(17) +#define TB_LC_SX_CTRL_L2C BIT(20) +#define TB_LC_SX_CTRL_L2D BIT(21) +#define TB_LC_SX_CTRL_SLI BIT(29) +#define TB_LC_SX_CTRL_UPSTREAM BIT(30) +#define TB_LC_SX_CTRL_SLP BIT(31) +#define TB_LC_LINK_ATTR 0x97 +#define TB_LC_LINK_ATTR_CPS BIT(18) + +#define TB_LC_LINK_REQ 0xad +#define TB_LC_LINK_REQ_XHCI_CONNECT BIT(31) #endif diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c new file mode 100644 index 000000000000..24c06e7354cd --- /dev/null +++ b/drivers/thunderbolt/test.c @@ -0,0 +1,2820 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KUnit tests + * + * Copyright (C) 2020, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <kunit/test.h> +#include <linux/idr.h> + +#include "tb.h" +#include "tunnel.h" + +static int __ida_init(struct kunit_resource *res, void *context) +{ + struct ida *ida = context; + + ida_init(ida); + res->data = ida; + return 0; +} + +static void __ida_destroy(struct kunit_resource *res) +{ + struct ida *ida = res->data; + + ida_destroy(ida); +} + +static void kunit_ida_init(struct kunit *test, struct ida *ida) +{ + kunit_alloc_resource(test, __ida_init, __ida_destroy, GFP_KERNEL, ida); +} + +static struct tb_switch *alloc_switch(struct kunit *test, u64 route, + u8 upstream_port, u8 max_port_number) +{ + struct tb_switch *sw; + size_t size; + int i; + + sw = kunit_kzalloc(test, sizeof(*sw), GFP_KERNEL); + if (!sw) + return NULL; + + sw->config.upstream_port_number = upstream_port; + sw->config.depth = tb_route_length(route); + sw->config.route_hi = upper_32_bits(route); + sw->config.route_lo = lower_32_bits(route); + sw->config.enabled = 0; + sw->config.max_port_number = max_port_number; + + size = (sw->config.max_port_number + 1) * sizeof(*sw->ports); + sw->ports = kunit_kzalloc(test, size, GFP_KERNEL); + if (!sw->ports) + return NULL; + + for (i = 0; i <= sw->config.max_port_number; i++) { + sw->ports[i].sw = sw; + sw->ports[i].port = i; + sw->ports[i].config.port_number = i; + if (i) { + kunit_ida_init(test, &sw->ports[i].in_hopids); + kunit_ida_init(test, &sw->ports[i].out_hopids); + } + } + + return sw; +} + +static struct tb_switch *alloc_host(struct kunit *test) +{ + struct tb_switch *sw; + + sw = alloc_switch(test, 0, 7, 13); + if (!sw) + return NULL; + + sw->config.vendor_id = 0x8086; + sw->config.device_id = 0x9a1b; + + sw->ports[0].config.type = TB_TYPE_PORT; + sw->ports[0].config.max_in_hop_id = 7; + sw->ports[0].config.max_out_hop_id = 7; + + sw->ports[1].config.type = TB_TYPE_PORT; + sw->ports[1].config.max_in_hop_id = 19; + sw->ports[1].config.max_out_hop_id = 19; + sw->ports[1].total_credits = 60; + sw->ports[1].ctl_credits = 2; + sw->ports[1].dual_link_port = &sw->ports[2]; + + sw->ports[2].config.type = TB_TYPE_PORT; + sw->ports[2].config.max_in_hop_id = 19; + sw->ports[2].config.max_out_hop_id = 19; + sw->ports[2].total_credits = 60; + sw->ports[2].ctl_credits = 2; + sw->ports[2].dual_link_port = &sw->ports[1]; + sw->ports[2].link_nr = 1; + + sw->ports[3].config.type = TB_TYPE_PORT; + sw->ports[3].config.max_in_hop_id = 19; + sw->ports[3].config.max_out_hop_id = 19; + sw->ports[3].total_credits = 60; + sw->ports[3].ctl_credits = 2; + sw->ports[3].dual_link_port = &sw->ports[4]; + + sw->ports[4].config.type = TB_TYPE_PORT; + sw->ports[4].config.max_in_hop_id = 19; + sw->ports[4].config.max_out_hop_id = 19; + sw->ports[4].total_credits = 60; + sw->ports[4].ctl_credits = 2; + sw->ports[4].dual_link_port = &sw->ports[3]; + sw->ports[4].link_nr = 1; + + sw->ports[5].config.type = TB_TYPE_DP_HDMI_IN; + sw->ports[5].config.max_in_hop_id = 9; + sw->ports[5].config.max_out_hop_id = 9; + sw->ports[5].cap_adap = -1; + + sw->ports[6].config.type = TB_TYPE_DP_HDMI_IN; + sw->ports[6].config.max_in_hop_id = 9; + sw->ports[6].config.max_out_hop_id = 9; + sw->ports[6].cap_adap = -1; + + sw->ports[7].config.type = TB_TYPE_NHI; + sw->ports[7].config.max_in_hop_id = 11; + sw->ports[7].config.max_out_hop_id = 11; + sw->ports[7].config.nfc_credits = 0x41800000; + + sw->ports[8].config.type = TB_TYPE_PCIE_DOWN; + sw->ports[8].config.max_in_hop_id = 8; + sw->ports[8].config.max_out_hop_id = 8; + + sw->ports[9].config.type = TB_TYPE_PCIE_DOWN; + sw->ports[9].config.max_in_hop_id = 8; + sw->ports[9].config.max_out_hop_id = 8; + + sw->ports[10].disabled = true; + sw->ports[11].disabled = true; + + sw->ports[12].config.type = TB_TYPE_USB3_DOWN; + sw->ports[12].config.max_in_hop_id = 8; + sw->ports[12].config.max_out_hop_id = 8; + + sw->ports[13].config.type = TB_TYPE_USB3_DOWN; + sw->ports[13].config.max_in_hop_id = 8; + sw->ports[13].config.max_out_hop_id = 8; + + return sw; +} + +static struct tb_switch *alloc_host_usb4(struct kunit *test) +{ + struct tb_switch *sw; + + sw = alloc_host(test); + if (!sw) + return NULL; + + sw->generation = 4; + sw->credit_allocation = true; + sw->max_usb3_credits = 32; + sw->min_dp_aux_credits = 1; + sw->min_dp_main_credits = 0; + sw->max_pcie_credits = 64; + sw->max_dma_credits = 14; + + return sw; +} + +static struct tb_switch *alloc_dev_default(struct kunit *test, + struct tb_switch *parent, + u64 route, bool bonded) +{ + struct tb_port *port, *upstream_port; + struct tb_switch *sw; + + sw = alloc_switch(test, route, 1, 19); + if (!sw) + return NULL; + + sw->config.vendor_id = 0x8086; + sw->config.device_id = 0x15ef; + + sw->ports[0].config.type = TB_TYPE_PORT; + sw->ports[0].config.max_in_hop_id = 8; + sw->ports[0].config.max_out_hop_id = 8; + + sw->ports[1].config.type = TB_TYPE_PORT; + sw->ports[1].config.max_in_hop_id = 19; + sw->ports[1].config.max_out_hop_id = 19; + sw->ports[1].total_credits = 60; + sw->ports[1].ctl_credits = 2; + sw->ports[1].dual_link_port = &sw->ports[2]; + + sw->ports[2].config.type = TB_TYPE_PORT; + sw->ports[2].config.max_in_hop_id = 19; + sw->ports[2].config.max_out_hop_id = 19; + sw->ports[2].total_credits = 60; + sw->ports[2].ctl_credits = 2; + sw->ports[2].dual_link_port = &sw->ports[1]; + sw->ports[2].link_nr = 1; + + sw->ports[3].config.type = TB_TYPE_PORT; + sw->ports[3].config.max_in_hop_id = 19; + sw->ports[3].config.max_out_hop_id = 19; + sw->ports[3].total_credits = 60; + sw->ports[3].ctl_credits = 2; + sw->ports[3].dual_link_port = &sw->ports[4]; + + sw->ports[4].config.type = TB_TYPE_PORT; + sw->ports[4].config.max_in_hop_id = 19; + sw->ports[4].config.max_out_hop_id = 19; + sw->ports[4].total_credits = 60; + sw->ports[4].ctl_credits = 2; + sw->ports[4].dual_link_port = &sw->ports[3]; + sw->ports[4].link_nr = 1; + + sw->ports[5].config.type = TB_TYPE_PORT; + sw->ports[5].config.max_in_hop_id = 19; + sw->ports[5].config.max_out_hop_id = 19; + sw->ports[5].total_credits = 60; + sw->ports[5].ctl_credits = 2; + sw->ports[5].dual_link_port = &sw->ports[6]; + + sw->ports[6].config.type = TB_TYPE_PORT; + sw->ports[6].config.max_in_hop_id = 19; + sw->ports[6].config.max_out_hop_id = 19; + sw->ports[6].total_credits = 60; + sw->ports[6].ctl_credits = 2; + sw->ports[6].dual_link_port = &sw->ports[5]; + sw->ports[6].link_nr = 1; + + sw->ports[7].config.type = TB_TYPE_PORT; + sw->ports[7].config.max_in_hop_id = 19; + sw->ports[7].config.max_out_hop_id = 19; + sw->ports[7].total_credits = 60; + sw->ports[7].ctl_credits = 2; + sw->ports[7].dual_link_port = &sw->ports[8]; + + sw->ports[8].config.type = TB_TYPE_PORT; + sw->ports[8].config.max_in_hop_id = 19; + sw->ports[8].config.max_out_hop_id = 19; + sw->ports[8].total_credits = 60; + sw->ports[8].ctl_credits = 2; + sw->ports[8].dual_link_port = &sw->ports[7]; + sw->ports[8].link_nr = 1; + + sw->ports[9].config.type = TB_TYPE_PCIE_UP; + sw->ports[9].config.max_in_hop_id = 8; + sw->ports[9].config.max_out_hop_id = 8; + + sw->ports[10].config.type = TB_TYPE_PCIE_DOWN; + sw->ports[10].config.max_in_hop_id = 8; + sw->ports[10].config.max_out_hop_id = 8; + + sw->ports[11].config.type = TB_TYPE_PCIE_DOWN; + sw->ports[11].config.max_in_hop_id = 8; + sw->ports[11].config.max_out_hop_id = 8; + + sw->ports[12].config.type = TB_TYPE_PCIE_DOWN; + sw->ports[12].config.max_in_hop_id = 8; + sw->ports[12].config.max_out_hop_id = 8; + + sw->ports[13].config.type = TB_TYPE_DP_HDMI_OUT; + sw->ports[13].config.max_in_hop_id = 9; + sw->ports[13].config.max_out_hop_id = 9; + sw->ports[13].cap_adap = -1; + + sw->ports[14].config.type = TB_TYPE_DP_HDMI_OUT; + sw->ports[14].config.max_in_hop_id = 9; + sw->ports[14].config.max_out_hop_id = 9; + sw->ports[14].cap_adap = -1; + + sw->ports[15].disabled = true; + + sw->ports[16].config.type = TB_TYPE_USB3_UP; + sw->ports[16].config.max_in_hop_id = 8; + sw->ports[16].config.max_out_hop_id = 8; + + sw->ports[17].config.type = TB_TYPE_USB3_DOWN; + sw->ports[17].config.max_in_hop_id = 8; + sw->ports[17].config.max_out_hop_id = 8; + + sw->ports[18].config.type = TB_TYPE_USB3_DOWN; + sw->ports[18].config.max_in_hop_id = 8; + sw->ports[18].config.max_out_hop_id = 8; + + sw->ports[19].config.type = TB_TYPE_USB3_DOWN; + sw->ports[19].config.max_in_hop_id = 8; + sw->ports[19].config.max_out_hop_id = 8; + + if (!parent) + return sw; + + /* Link them */ + upstream_port = tb_upstream_port(sw); + port = tb_port_at(route, parent); + port->remote = upstream_port; + upstream_port->remote = port; + if (port->dual_link_port && upstream_port->dual_link_port) { + port->dual_link_port->remote = upstream_port->dual_link_port; + upstream_port->dual_link_port->remote = port->dual_link_port; + + if (bonded) { + /* Bonding is used */ + port->bonded = true; + port->total_credits *= 2; + port->dual_link_port->bonded = true; + port->dual_link_port->total_credits = 0; + upstream_port->bonded = true; + upstream_port->total_credits *= 2; + upstream_port->dual_link_port->bonded = true; + upstream_port->dual_link_port->total_credits = 0; + } + } + + return sw; +} + +static struct tb_switch *alloc_dev_with_dpin(struct kunit *test, + struct tb_switch *parent, + u64 route, bool bonded) +{ + struct tb_switch *sw; + + sw = alloc_dev_default(test, parent, route, bonded); + if (!sw) + return NULL; + + sw->ports[13].config.type = TB_TYPE_DP_HDMI_IN; + sw->ports[13].config.max_in_hop_id = 9; + sw->ports[13].config.max_out_hop_id = 9; + + sw->ports[14].config.type = TB_TYPE_DP_HDMI_IN; + sw->ports[14].config.max_in_hop_id = 9; + sw->ports[14].config.max_out_hop_id = 9; + + return sw; +} + +static struct tb_switch *alloc_dev_without_dp(struct kunit *test, + struct tb_switch *parent, + u64 route, bool bonded) +{ + struct tb_switch *sw; + int i; + + sw = alloc_dev_default(test, parent, route, bonded); + if (!sw) + return NULL; + /* + * Device with: + * 2x USB4 Adapters (adapters 1,2 and 3,4), + * 1x PCIe Upstream (adapter 9), + * 1x PCIe Downstream (adapter 10), + * 1x USB3 Upstream (adapter 16), + * 1x USB3 Downstream (adapter 17) + */ + for (i = 5; i <= 8; i++) + sw->ports[i].disabled = true; + + for (i = 11; i <= 14; i++) + sw->ports[i].disabled = true; + + sw->ports[13].cap_adap = 0; + sw->ports[14].cap_adap = 0; + + for (i = 18; i <= 19; i++) + sw->ports[i].disabled = true; + + sw->generation = 4; + sw->credit_allocation = true; + sw->max_usb3_credits = 109; + sw->min_dp_aux_credits = 0; + sw->min_dp_main_credits = 0; + sw->max_pcie_credits = 30; + sw->max_dma_credits = 1; + + return sw; +} + +static struct tb_switch *alloc_dev_usb4(struct kunit *test, + struct tb_switch *parent, + u64 route, bool bonded) +{ + struct tb_switch *sw; + + sw = alloc_dev_default(test, parent, route, bonded); + if (!sw) + return NULL; + + sw->generation = 4; + sw->credit_allocation = true; + sw->max_usb3_credits = 14; + sw->min_dp_aux_credits = 1; + sw->min_dp_main_credits = 18; + sw->max_pcie_credits = 32; + sw->max_dma_credits = 14; + + return sw; +} + +static void tb_test_path_basic(struct kunit *test) +{ + struct tb_port *src_port, *dst_port, *p; + struct tb_switch *host; + + host = alloc_host(test); + + src_port = &host->ports[5]; + dst_port = src_port; + + p = tb_next_port_on_path(src_port, dst_port, NULL); + KUNIT_EXPECT_PTR_EQ(test, p, dst_port); + + p = tb_next_port_on_path(src_port, dst_port, p); + KUNIT_EXPECT_TRUE(test, !p); +} + +static void tb_test_path_not_connected_walk(struct kunit *test) +{ + struct tb_port *src_port, *dst_port, *p; + struct tb_switch *host, *dev; + + host = alloc_host(test); + /* No connection between host and dev */ + dev = alloc_dev_default(test, NULL, 3, true); + + src_port = &host->ports[12]; + dst_port = &dev->ports[16]; + + p = tb_next_port_on_path(src_port, dst_port, NULL); + KUNIT_EXPECT_PTR_EQ(test, p, src_port); + + p = tb_next_port_on_path(src_port, dst_port, p); + KUNIT_EXPECT_PTR_EQ(test, p, &host->ports[3]); + + p = tb_next_port_on_path(src_port, dst_port, p); + KUNIT_EXPECT_TRUE(test, !p); + + /* Other direction */ + + p = tb_next_port_on_path(dst_port, src_port, NULL); + KUNIT_EXPECT_PTR_EQ(test, p, dst_port); + + p = tb_next_port_on_path(dst_port, src_port, p); + KUNIT_EXPECT_PTR_EQ(test, p, &dev->ports[1]); + + p = tb_next_port_on_path(dst_port, src_port, p); + KUNIT_EXPECT_TRUE(test, !p); +} + +struct port_expectation { + u64 route; + u8 port; + enum tb_port_type type; +}; + +static void tb_test_path_single_hop_walk(struct kunit *test) +{ + /* + * Walks from Host PCIe downstream port to Device #1 PCIe + * upstream port. + * + * [Host] + * 1 | + * 1 | + * [Device] + */ + static const struct port_expectation test_data[] = { + { .route = 0x0, .port = 8, .type = TB_TYPE_PCIE_DOWN }, + { .route = 0x0, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x1, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x1, .port = 9, .type = TB_TYPE_PCIE_UP }, + }; + struct tb_port *src_port, *dst_port, *p; + struct tb_switch *host, *dev; + int i; + + host = alloc_host(test); + dev = alloc_dev_default(test, host, 1, true); + + src_port = &host->ports[8]; + dst_port = &dev->ports[9]; + + /* Walk both directions */ + + i = 0; + tb_for_each_port_on_path(src_port, dst_port, p) { + KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data)); + KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, p->port, test_data[i].port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type, + test_data[i].type); + i++; + } + + KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data)); + + i = ARRAY_SIZE(test_data) - 1; + tb_for_each_port_on_path(dst_port, src_port, p) { + KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data)); + KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, p->port, test_data[i].port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type, + test_data[i].type); + i--; + } + + KUNIT_EXPECT_EQ(test, i, -1); +} + +static void tb_test_path_daisy_chain_walk(struct kunit *test) +{ + /* + * Walks from Host DP IN to Device #2 DP OUT. + * + * [Host] + * 1 | + * 1 | + * [Device #1] + * 3 / + * 1 / + * [Device #2] + */ + static const struct port_expectation test_data[] = { + { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN }, + { .route = 0x0, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x1, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x1, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x301, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x301, .port = 13, .type = TB_TYPE_DP_HDMI_OUT }, + }; + struct tb_port *src_port, *dst_port, *p; + struct tb_switch *host, *dev1, *dev2; + int i; + + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + dev2 = alloc_dev_default(test, dev1, 0x301, true); + + src_port = &host->ports[5]; + dst_port = &dev2->ports[13]; + + /* Walk both directions */ + + i = 0; + tb_for_each_port_on_path(src_port, dst_port, p) { + KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data)); + KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, p->port, test_data[i].port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type, + test_data[i].type); + i++; + } + + KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data)); + + i = ARRAY_SIZE(test_data) - 1; + tb_for_each_port_on_path(dst_port, src_port, p) { + KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data)); + KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, p->port, test_data[i].port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type, + test_data[i].type); + i--; + } + + KUNIT_EXPECT_EQ(test, i, -1); +} + +static void tb_test_path_simple_tree_walk(struct kunit *test) +{ + /* + * Walks from Host DP IN to Device #3 DP OUT. + * + * [Host] + * 1 | + * 1 | + * [Device #1] + * 3 / | 5 \ 7 + * 1 / | \ 1 + * [Device #2] | [Device #4] + * | 1 + * [Device #3] + */ + static const struct port_expectation test_data[] = { + { .route = 0x0, .port = 5, .type = TB_TYPE_DP_HDMI_IN }, + { .route = 0x0, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x1, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x1, .port = 5, .type = TB_TYPE_PORT }, + { .route = 0x501, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x501, .port = 13, .type = TB_TYPE_DP_HDMI_OUT }, + }; + struct tb_port *src_port, *dst_port, *p; + struct tb_switch *host, *dev1, *dev3; + int i; + + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + alloc_dev_default(test, dev1, 0x301, true); + dev3 = alloc_dev_default(test, dev1, 0x501, true); + alloc_dev_default(test, dev1, 0x701, true); + + src_port = &host->ports[5]; + dst_port = &dev3->ports[13]; + + /* Walk both directions */ + + i = 0; + tb_for_each_port_on_path(src_port, dst_port, p) { + KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data)); + KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, p->port, test_data[i].port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type, + test_data[i].type); + i++; + } + + KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data)); + + i = ARRAY_SIZE(test_data) - 1; + tb_for_each_port_on_path(dst_port, src_port, p) { + KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data)); + KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, p->port, test_data[i].port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type, + test_data[i].type); + i--; + } + + KUNIT_EXPECT_EQ(test, i, -1); +} + +static void tb_test_path_complex_tree_walk(struct kunit *test) +{ + /* + * Walks from Device #3 DP IN to Device #9 DP OUT. + * + * [Host] + * 1 | + * 1 | + * [Device #1] + * 3 / | 5 \ 7 + * 1 / | \ 1 + * [Device #2] | [Device #5] + * 5 | | 1 \ 7 + * 1 | [Device #4] \ 1 + * [Device #3] [Device #6] + * 3 / + * 1 / + * [Device #7] + * 3 / | 5 + * 1 / | + * [Device #8] | 1 + * [Device #9] + */ + static const struct port_expectation test_data[] = { + { .route = 0x50301, .port = 13, .type = TB_TYPE_DP_HDMI_IN }, + { .route = 0x50301, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x301, .port = 5, .type = TB_TYPE_PORT }, + { .route = 0x301, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x1, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x1, .port = 7, .type = TB_TYPE_PORT }, + { .route = 0x701, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x701, .port = 7, .type = TB_TYPE_PORT }, + { .route = 0x70701, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x70701, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x3070701, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x3070701, .port = 5, .type = TB_TYPE_PORT }, + { .route = 0x503070701, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x503070701, .port = 14, .type = TB_TYPE_DP_HDMI_OUT }, + }; + struct tb_switch *host, *dev1, *dev2, *dev3, *dev5, *dev6, *dev7, *dev9; + struct tb_port *src_port, *dst_port, *p; + int i; + + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + dev2 = alloc_dev_default(test, dev1, 0x301, true); + dev3 = alloc_dev_with_dpin(test, dev2, 0x50301, true); + alloc_dev_default(test, dev1, 0x501, true); + dev5 = alloc_dev_default(test, dev1, 0x701, true); + dev6 = alloc_dev_default(test, dev5, 0x70701, true); + dev7 = alloc_dev_default(test, dev6, 0x3070701, true); + alloc_dev_default(test, dev7, 0x303070701, true); + dev9 = alloc_dev_default(test, dev7, 0x503070701, true); + + src_port = &dev3->ports[13]; + dst_port = &dev9->ports[14]; + + /* Walk both directions */ + + i = 0; + tb_for_each_port_on_path(src_port, dst_port, p) { + KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data)); + KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, p->port, test_data[i].port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type, + test_data[i].type); + i++; + } + + KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data)); + + i = ARRAY_SIZE(test_data) - 1; + tb_for_each_port_on_path(dst_port, src_port, p) { + KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data)); + KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, p->port, test_data[i].port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type, + test_data[i].type); + i--; + } + + KUNIT_EXPECT_EQ(test, i, -1); +} + +static void tb_test_path_max_length_walk(struct kunit *test) +{ + struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6; + struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12; + struct tb_port *src_port, *dst_port, *p; + int i; + + /* + * Walks from Device #6 DP IN to Device #12 DP OUT. + * + * [Host] + * 1 / \ 3 + * 1 / \ 1 + * [Device #1] [Device #7] + * 3 | | 3 + * 1 | | 1 + * [Device #2] [Device #8] + * 3 | | 3 + * 1 | | 1 + * [Device #3] [Device #9] + * 3 | | 3 + * 1 | | 1 + * [Device #4] [Device #10] + * 3 | | 3 + * 1 | | 1 + * [Device #5] [Device #11] + * 3 | | 3 + * 1 | | 1 + * [Device #6] [Device #12] + */ + static const struct port_expectation test_data[] = { + { .route = 0x30303030301, .port = 13, .type = TB_TYPE_DP_HDMI_IN }, + { .route = 0x30303030301, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x303030301, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x303030301, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x3030301, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x3030301, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x30301, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x30301, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x301, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x301, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x1, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x1, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x0, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x0, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x3, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x3, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x303, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x303, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x30303, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x30303, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x3030303, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x3030303, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x303030303, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x303030303, .port = 3, .type = TB_TYPE_PORT }, + { .route = 0x30303030303, .port = 1, .type = TB_TYPE_PORT }, + { .route = 0x30303030303, .port = 13, .type = TB_TYPE_DP_HDMI_OUT }, + }; + + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + dev2 = alloc_dev_default(test, dev1, 0x301, true); + dev3 = alloc_dev_default(test, dev2, 0x30301, true); + dev4 = alloc_dev_default(test, dev3, 0x3030301, true); + dev5 = alloc_dev_default(test, dev4, 0x303030301, true); + dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true); + dev7 = alloc_dev_default(test, host, 0x3, true); + dev8 = alloc_dev_default(test, dev7, 0x303, true); + dev9 = alloc_dev_default(test, dev8, 0x30303, true); + dev10 = alloc_dev_default(test, dev9, 0x3030303, true); + dev11 = alloc_dev_default(test, dev10, 0x303030303, true); + dev12 = alloc_dev_default(test, dev11, 0x30303030303, true); + + src_port = &dev6->ports[13]; + dst_port = &dev12->ports[13]; + + /* Walk both directions */ + + i = 0; + tb_for_each_port_on_path(src_port, dst_port, p) { + KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data)); + KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, p->port, test_data[i].port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type, + test_data[i].type); + i++; + } + + KUNIT_EXPECT_EQ(test, i, ARRAY_SIZE(test_data)); + + i = ARRAY_SIZE(test_data) - 1; + tb_for_each_port_on_path(dst_port, src_port, p) { + KUNIT_EXPECT_TRUE(test, i < ARRAY_SIZE(test_data)); + KUNIT_EXPECT_EQ(test, tb_route(p->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, p->port, test_data[i].port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)p->config.type, + test_data[i].type); + i--; + } + + KUNIT_EXPECT_EQ(test, i, -1); +} + +static void tb_test_path_not_connected(struct kunit *test) +{ + struct tb_switch *host, *dev1, *dev2; + struct tb_port *down, *up; + struct tb_path *path; + + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x3, false); + /* Not connected to anything */ + dev2 = alloc_dev_default(test, NULL, 0x303, false); + + down = &dev1->ports[10]; + up = &dev2->ports[9]; + + path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down"); + KUNIT_ASSERT_NULL(test, path); + path = tb_path_alloc(NULL, down, 8, up, 8, 1, "PCIe Down"); + KUNIT_ASSERT_NULL(test, path); +} + +struct hop_expectation { + u64 route; + u8 in_port; + enum tb_port_type in_type; + u8 out_port; + enum tb_port_type out_type; +}; + +static void tb_test_path_not_bonded_lane0(struct kunit *test) +{ + /* + * PCIe path from host to device using lane 0. + * + * [Host] + * 3 |: 4 + * 1 |: 2 + * [Device] + */ + static const struct hop_expectation test_data[] = { + { + .route = 0x0, + .in_port = 9, + .in_type = TB_TYPE_PCIE_DOWN, + .out_port = 3, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x3, + .in_port = 1, + .in_type = TB_TYPE_PORT, + .out_port = 9, + .out_type = TB_TYPE_PCIE_UP, + }, + }; + struct tb_switch *host, *dev; + struct tb_port *down, *up; + struct tb_path *path; + int i; + + host = alloc_host(test); + dev = alloc_dev_default(test, host, 0x3, false); + + down = &host->ports[9]; + up = &dev->ports[9]; + + path = tb_path_alloc(NULL, down, 8, up, 8, 0, "PCIe Down"); + KUNIT_ASSERT_NOT_NULL(test, path); + KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data)); + for (i = 0; i < ARRAY_SIZE(test_data); i++) { + const struct tb_port *in_port, *out_port; + + in_port = path->hops[i].in_port; + out_port = path->hops[i].out_port; + + KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type, + test_data[i].in_type); + KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type, + test_data[i].out_type); + } + tb_path_free(path); +} + +static void tb_test_path_not_bonded_lane1(struct kunit *test) +{ + /* + * DP Video path from host to device using lane 1. Paths like + * these are only used with Thunderbolt 1 devices where lane + * bonding is not possible. USB4 specifically does not allow + * paths like this (you either use lane 0 where lane 1 is + * disabled or both lanes are bonded). + * + * [Host] + * 1 :| 2 + * 1 :| 2 + * [Device] + */ + static const struct hop_expectation test_data[] = { + { + .route = 0x0, + .in_port = 5, + .in_type = TB_TYPE_DP_HDMI_IN, + .out_port = 2, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x1, + .in_port = 2, + .in_type = TB_TYPE_PORT, + .out_port = 13, + .out_type = TB_TYPE_DP_HDMI_OUT, + }, + }; + struct tb_switch *host, *dev; + struct tb_port *in, *out; + struct tb_path *path; + int i; + + host = alloc_host(test); + dev = alloc_dev_default(test, host, 0x1, false); + + in = &host->ports[5]; + out = &dev->ports[13]; + + path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video"); + KUNIT_ASSERT_NOT_NULL(test, path); + KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data)); + for (i = 0; i < ARRAY_SIZE(test_data); i++) { + const struct tb_port *in_port, *out_port; + + in_port = path->hops[i].in_port; + out_port = path->hops[i].out_port; + + KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type, + test_data[i].in_type); + KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type, + test_data[i].out_type); + } + tb_path_free(path); +} + +static void tb_test_path_not_bonded_lane1_chain(struct kunit *test) +{ + /* + * DP Video path from host to device 3 using lane 1. + * + * [Host] + * 1 :| 2 + * 1 :| 2 + * [Device #1] + * 7 :| 8 + * 1 :| 2 + * [Device #2] + * 5 :| 6 + * 1 :| 2 + * [Device #3] + */ + static const struct hop_expectation test_data[] = { + { + .route = 0x0, + .in_port = 5, + .in_type = TB_TYPE_DP_HDMI_IN, + .out_port = 2, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x1, + .in_port = 2, + .in_type = TB_TYPE_PORT, + .out_port = 8, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x701, + .in_port = 2, + .in_type = TB_TYPE_PORT, + .out_port = 6, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x50701, + .in_port = 2, + .in_type = TB_TYPE_PORT, + .out_port = 13, + .out_type = TB_TYPE_DP_HDMI_OUT, + }, + }; + struct tb_switch *host, *dev1, *dev2, *dev3; + struct tb_port *in, *out; + struct tb_path *path; + int i; + + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, false); + dev2 = alloc_dev_default(test, dev1, 0x701, false); + dev3 = alloc_dev_default(test, dev2, 0x50701, false); + + in = &host->ports[5]; + out = &dev3->ports[13]; + + path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video"); + KUNIT_ASSERT_NOT_NULL(test, path); + KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data)); + for (i = 0; i < ARRAY_SIZE(test_data); i++) { + const struct tb_port *in_port, *out_port; + + in_port = path->hops[i].in_port; + out_port = path->hops[i].out_port; + + KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type, + test_data[i].in_type); + KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type, + test_data[i].out_type); + } + tb_path_free(path); +} + +static void tb_test_path_not_bonded_lane1_chain_reverse(struct kunit *test) +{ + /* + * DP Video path from device 3 to host using lane 1. + * + * [Host] + * 1 :| 2 + * 1 :| 2 + * [Device #1] + * 7 :| 8 + * 1 :| 2 + * [Device #2] + * 5 :| 6 + * 1 :| 2 + * [Device #3] + */ + static const struct hop_expectation test_data[] = { + { + .route = 0x50701, + .in_port = 13, + .in_type = TB_TYPE_DP_HDMI_IN, + .out_port = 2, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x701, + .in_port = 6, + .in_type = TB_TYPE_PORT, + .out_port = 2, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x1, + .in_port = 8, + .in_type = TB_TYPE_PORT, + .out_port = 2, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x0, + .in_port = 2, + .in_type = TB_TYPE_PORT, + .out_port = 5, + .out_type = TB_TYPE_DP_HDMI_IN, + }, + }; + struct tb_switch *host, *dev1, *dev2, *dev3; + struct tb_port *in, *out; + struct tb_path *path; + int i; + + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, false); + dev2 = alloc_dev_default(test, dev1, 0x701, false); + dev3 = alloc_dev_with_dpin(test, dev2, 0x50701, false); + + in = &dev3->ports[13]; + out = &host->ports[5]; + + path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video"); + KUNIT_ASSERT_NOT_NULL(test, path); + KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data)); + for (i = 0; i < ARRAY_SIZE(test_data); i++) { + const struct tb_port *in_port, *out_port; + + in_port = path->hops[i].in_port; + out_port = path->hops[i].out_port; + + KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type, + test_data[i].in_type); + KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type, + test_data[i].out_type); + } + tb_path_free(path); +} + +static void tb_test_path_mixed_chain(struct kunit *test) +{ + /* + * DP Video path from host to device 4 where first and last link + * is bonded. + * + * [Host] + * 1 | + * 1 | + * [Device #1] + * 7 :| 8 + * 1 :| 2 + * [Device #2] + * 5 :| 6 + * 1 :| 2 + * [Device #3] + * 3 | + * 1 | + * [Device #4] + */ + static const struct hop_expectation test_data[] = { + { + .route = 0x0, + .in_port = 5, + .in_type = TB_TYPE_DP_HDMI_IN, + .out_port = 1, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x1, + .in_port = 1, + .in_type = TB_TYPE_PORT, + .out_port = 8, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x701, + .in_port = 2, + .in_type = TB_TYPE_PORT, + .out_port = 6, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x50701, + .in_port = 2, + .in_type = TB_TYPE_PORT, + .out_port = 3, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x3050701, + .in_port = 1, + .in_type = TB_TYPE_PORT, + .out_port = 13, + .out_type = TB_TYPE_DP_HDMI_OUT, + }, + }; + struct tb_switch *host, *dev1, *dev2, *dev3, *dev4; + struct tb_port *in, *out; + struct tb_path *path; + int i; + + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + dev2 = alloc_dev_default(test, dev1, 0x701, false); + dev3 = alloc_dev_default(test, dev2, 0x50701, false); + dev4 = alloc_dev_default(test, dev3, 0x3050701, true); + + in = &host->ports[5]; + out = &dev4->ports[13]; + + path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video"); + KUNIT_ASSERT_NOT_NULL(test, path); + KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data)); + for (i = 0; i < ARRAY_SIZE(test_data); i++) { + const struct tb_port *in_port, *out_port; + + in_port = path->hops[i].in_port; + out_port = path->hops[i].out_port; + + KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type, + test_data[i].in_type); + KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type, + test_data[i].out_type); + } + tb_path_free(path); +} + +static void tb_test_path_mixed_chain_reverse(struct kunit *test) +{ + /* + * DP Video path from device 4 to host where first and last link + * is bonded. + * + * [Host] + * 1 | + * 1 | + * [Device #1] + * 7 :| 8 + * 1 :| 2 + * [Device #2] + * 5 :| 6 + * 1 :| 2 + * [Device #3] + * 3 | + * 1 | + * [Device #4] + */ + static const struct hop_expectation test_data[] = { + { + .route = 0x3050701, + .in_port = 13, + .in_type = TB_TYPE_DP_HDMI_OUT, + .out_port = 1, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x50701, + .in_port = 3, + .in_type = TB_TYPE_PORT, + .out_port = 2, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x701, + .in_port = 6, + .in_type = TB_TYPE_PORT, + .out_port = 2, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x1, + .in_port = 8, + .in_type = TB_TYPE_PORT, + .out_port = 1, + .out_type = TB_TYPE_PORT, + }, + { + .route = 0x0, + .in_port = 1, + .in_type = TB_TYPE_PORT, + .out_port = 5, + .out_type = TB_TYPE_DP_HDMI_IN, + }, + }; + struct tb_switch *host, *dev1, *dev2, *dev3, *dev4; + struct tb_port *in, *out; + struct tb_path *path; + int i; + + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + dev2 = alloc_dev_default(test, dev1, 0x701, false); + dev3 = alloc_dev_default(test, dev2, 0x50701, false); + dev4 = alloc_dev_default(test, dev3, 0x3050701, true); + + in = &dev4->ports[13]; + out = &host->ports[5]; + + path = tb_path_alloc(NULL, in, 9, out, 9, 1, "Video"); + KUNIT_ASSERT_NOT_NULL(test, path); + KUNIT_ASSERT_EQ(test, path->path_length, ARRAY_SIZE(test_data)); + for (i = 0; i < ARRAY_SIZE(test_data); i++) { + const struct tb_port *in_port, *out_port; + + in_port = path->hops[i].in_port; + out_port = path->hops[i].out_port; + + KUNIT_EXPECT_EQ(test, tb_route(in_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, in_port->port, test_data[i].in_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)in_port->config.type, + test_data[i].in_type); + KUNIT_EXPECT_EQ(test, tb_route(out_port->sw), test_data[i].route); + KUNIT_EXPECT_EQ(test, out_port->port, test_data[i].out_port); + KUNIT_EXPECT_EQ(test, (enum tb_port_type)out_port->config.type, + test_data[i].out_type); + } + tb_path_free(path); +} + +static void tb_test_tunnel_pcie(struct kunit *test) +{ + struct tb_switch *host, *dev1, *dev2; + struct tb_tunnel *tunnel1, *tunnel2; + struct tb_port *down, *up; + + /* + * Create PCIe tunnel between host and two devices. + * + * [Host] + * 1 | + * 1 | + * [Device #1] + * 5 | + * 1 | + * [Device #2] + */ + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + dev2 = alloc_dev_default(test, dev1, 0x501, true); + + down = &host->ports[8]; + up = &dev1->ports[9]; + tunnel1 = tb_tunnel_alloc_pci(NULL, up, down); + KUNIT_ASSERT_NOT_NULL(test, tunnel1); + KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_PCI); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up); + KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2); + KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up); + KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down); + + down = &dev1->ports[10]; + up = &dev2->ports[9]; + tunnel2 = tb_tunnel_alloc_pci(NULL, up, down); + KUNIT_ASSERT_NOT_NULL(test, tunnel2); + KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_PCI); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up); + KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2); + KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up); + KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down); + + tb_tunnel_free(tunnel2); + tb_tunnel_free(tunnel1); +} + +static void tb_test_tunnel_dp(struct kunit *test) +{ + struct tb_switch *host, *dev; + struct tb_port *in, *out; + struct tb_tunnel *tunnel; + + /* + * Create DP tunnel between Host and Device + * + * [Host] + * 1 | + * 1 | + * [Device] + */ + host = alloc_host(test); + dev = alloc_dev_default(test, host, 0x3, true); + + in = &host->ports[5]; + out = &dev->ports[13]; + + tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP); + KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out); + KUNIT_ASSERT_EQ(test, tunnel->npaths, 3); + KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, out); + KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, out); + KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[1].out_port, in); + tb_tunnel_free(tunnel); +} + +static void tb_test_tunnel_dp_chain(struct kunit *test) +{ + struct tb_switch *host, *dev1, *dev4; + struct tb_port *in, *out; + struct tb_tunnel *tunnel; + + /* + * Create DP tunnel from Host DP IN to Device #4 DP OUT. + * + * [Host] + * 1 | + * 1 | + * [Device #1] + * 3 / | 5 \ 7 + * 1 / | \ 1 + * [Device #2] | [Device #4] + * | 1 + * [Device #3] + */ + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + alloc_dev_default(test, dev1, 0x301, true); + alloc_dev_default(test, dev1, 0x501, true); + dev4 = alloc_dev_default(test, dev1, 0x701, true); + + in = &host->ports[5]; + out = &dev4->ports[14]; + + tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP); + KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out); + KUNIT_ASSERT_EQ(test, tunnel->npaths, 3); + KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, out); + KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, out); + KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 3); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[2].out_port, in); + tb_tunnel_free(tunnel); +} + +static void tb_test_tunnel_dp_tree(struct kunit *test) +{ + struct tb_switch *host, *dev1, *dev2, *dev3, *dev5; + struct tb_port *in, *out; + struct tb_tunnel *tunnel; + + /* + * Create DP tunnel from Device #2 DP IN to Device #5 DP OUT. + * + * [Host] + * 3 | + * 1 | + * [Device #1] + * 3 / | 5 \ 7 + * 1 / | \ 1 + * [Device #2] | [Device #4] + * | 1 + * [Device #3] + * | 5 + * | 1 + * [Device #5] + */ + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x3, true); + dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true); + dev3 = alloc_dev_default(test, dev1, 0x503, true); + alloc_dev_default(test, dev1, 0x703, true); + dev5 = alloc_dev_default(test, dev3, 0x50503, true); + + in = &dev2->ports[13]; + out = &dev5->ports[13]; + + tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP); + KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out); + KUNIT_ASSERT_EQ(test, tunnel->npaths, 3); + KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 4); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[3].out_port, out); + KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 4); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[3].out_port, out); + KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 4); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[3].out_port, in); + tb_tunnel_free(tunnel); +} + +static void tb_test_tunnel_dp_max_length(struct kunit *test) +{ + struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5, *dev6; + struct tb_switch *dev7, *dev8, *dev9, *dev10, *dev11, *dev12; + struct tb_port *in, *out; + struct tb_tunnel *tunnel; + + /* + * Creates DP tunnel from Device #6 to Device #12. + * + * [Host] + * 1 / \ 3 + * 1 / \ 1 + * [Device #1] [Device #7] + * 3 | | 3 + * 1 | | 1 + * [Device #2] [Device #8] + * 3 | | 3 + * 1 | | 1 + * [Device #3] [Device #9] + * 3 | | 3 + * 1 | | 1 + * [Device #4] [Device #10] + * 3 | | 3 + * 1 | | 1 + * [Device #5] [Device #11] + * 3 | | 3 + * 1 | | 1 + * [Device #6] [Device #12] + */ + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + dev2 = alloc_dev_default(test, dev1, 0x301, true); + dev3 = alloc_dev_default(test, dev2, 0x30301, true); + dev4 = alloc_dev_default(test, dev3, 0x3030301, true); + dev5 = alloc_dev_default(test, dev4, 0x303030301, true); + dev6 = alloc_dev_with_dpin(test, dev5, 0x30303030301, true); + dev7 = alloc_dev_default(test, host, 0x3, true); + dev8 = alloc_dev_default(test, dev7, 0x303, true); + dev9 = alloc_dev_default(test, dev8, 0x30303, true); + dev10 = alloc_dev_default(test, dev9, 0x3030303, true); + dev11 = alloc_dev_default(test, dev10, 0x303030303, true); + dev12 = alloc_dev_default(test, dev11, 0x30303030303, true); + + in = &dev6->ports[13]; + out = &dev12->ports[13]; + + tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DP); + KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, out); + KUNIT_ASSERT_EQ(test, tunnel->npaths, 3); + KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 13); + /* First hop */ + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, in); + /* Middle */ + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].in_port, + &host->ports[1]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[6].out_port, + &host->ports[3]); + /* Last */ + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[12].out_port, out); + KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 13); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, in); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].in_port, + &host->ports[1]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[6].out_port, + &host->ports[3]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[12].out_port, out); + KUNIT_ASSERT_EQ(test, tunnel->paths[2]->path_length, 13); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[0].in_port, out); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].in_port, + &host->ports[3]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[6].out_port, + &host->ports[1]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[2]->hops[12].out_port, in); + tb_tunnel_free(tunnel); +} + +static void tb_test_tunnel_usb3(struct kunit *test) +{ + struct tb_switch *host, *dev1, *dev2; + struct tb_tunnel *tunnel1, *tunnel2; + struct tb_port *down, *up; + + /* + * Create USB3 tunnel between host and two devices. + * + * [Host] + * 1 | + * 1 | + * [Device #1] + * \ 7 + * \ 1 + * [Device #2] + */ + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + dev2 = alloc_dev_default(test, dev1, 0x701, true); + + down = &host->ports[12]; + up = &dev1->ports[16]; + tunnel1 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, tunnel1); + KUNIT_EXPECT_EQ(test, tunnel1->type, TB_TUNNEL_USB3); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->src_port, down); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->dst_port, up); + KUNIT_ASSERT_EQ(test, tunnel1->npaths, 2); + KUNIT_ASSERT_EQ(test, tunnel1->paths[0]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[0].in_port, down); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[0]->hops[1].out_port, up); + KUNIT_ASSERT_EQ(test, tunnel1->paths[1]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[0].in_port, up); + KUNIT_EXPECT_PTR_EQ(test, tunnel1->paths[1]->hops[1].out_port, down); + + down = &dev1->ports[17]; + up = &dev2->ports[16]; + tunnel2 = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, tunnel2); + KUNIT_EXPECT_EQ(test, tunnel2->type, TB_TUNNEL_USB3); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->src_port, down); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->dst_port, up); + KUNIT_ASSERT_EQ(test, tunnel2->npaths, 2); + KUNIT_ASSERT_EQ(test, tunnel2->paths[0]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[0].in_port, down); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[0]->hops[1].out_port, up); + KUNIT_ASSERT_EQ(test, tunnel2->paths[1]->path_length, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[0].in_port, up); + KUNIT_EXPECT_PTR_EQ(test, tunnel2->paths[1]->hops[1].out_port, down); + + tb_tunnel_free(tunnel2); + tb_tunnel_free(tunnel1); +} + +static void tb_test_tunnel_port_on_path(struct kunit *test) +{ + struct tb_switch *host, *dev1, *dev2, *dev3, *dev4, *dev5; + struct tb_port *in, *out, *port; + struct tb_tunnel *dp_tunnel; + + /* + * [Host] + * 3 | + * 1 | + * [Device #1] + * 3 / | 5 \ 7 + * 1 / | \ 1 + * [Device #2] | [Device #4] + * | 1 + * [Device #3] + * | 5 + * | 1 + * [Device #5] + */ + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x3, true); + dev2 = alloc_dev_with_dpin(test, dev1, 0x303, true); + dev3 = alloc_dev_default(test, dev1, 0x503, true); + dev4 = alloc_dev_default(test, dev1, 0x703, true); + dev5 = alloc_dev_default(test, dev3, 0x50503, true); + + in = &dev2->ports[13]; + out = &dev5->ports[13]; + + dp_tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, dp_tunnel); + + KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, in)); + KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, out)); + + port = &host->ports[8]; + KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port)); + + port = &host->ports[3]; + KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port)); + + port = &dev1->ports[1]; + KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port)); + + port = &dev1->ports[3]; + KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port)); + + port = &dev1->ports[5]; + KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port)); + + port = &dev1->ports[7]; + KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port)); + + port = &dev3->ports[1]; + KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port)); + + port = &dev5->ports[1]; + KUNIT_EXPECT_TRUE(test, tb_tunnel_port_on_path(dp_tunnel, port)); + + port = &dev4->ports[1]; + KUNIT_EXPECT_FALSE(test, tb_tunnel_port_on_path(dp_tunnel, port)); + + tb_tunnel_free(dp_tunnel); +} + +static void tb_test_tunnel_dma(struct kunit *test) +{ + struct tb_port *nhi, *port; + struct tb_tunnel *tunnel; + struct tb_switch *host; + + /* + * Create DMA tunnel from NHI to port 1 and back. + * + * [Host 1] + * 1 ^ In HopID 1 -> Out HopID 8 + * | + * v In HopID 8 -> Out HopID 1 + * ............ Domain border + * | + * [Host 2] + */ + host = alloc_host(test); + nhi = &host->ports[7]; + port = &host->ports[1]; + + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA); + KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi); + KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port); + KUNIT_ASSERT_EQ(test, tunnel->npaths, 2); + /* RX path */ + KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port); + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi); + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 1); + /* TX path */ + KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 1); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi); + KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].out_port, port); + KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].next_hop_index, 8); + + tb_tunnel_free(tunnel); +} + +static void tb_test_tunnel_dma_rx(struct kunit *test) +{ + struct tb_port *nhi, *port; + struct tb_tunnel *tunnel; + struct tb_switch *host; + + /* + * Create DMA RX tunnel from port 1 to NHI. + * + * [Host 1] + * 1 ^ + * | + * | In HopID 15 -> Out HopID 2 + * ............ Domain border + * | + * [Host 2] + */ + host = alloc_host(test); + nhi = &host->ports[7]; + port = &host->ports[1]; + + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 2); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA); + KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi); + KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port); + KUNIT_ASSERT_EQ(test, tunnel->npaths, 1); + /* RX path */ + KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port); + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 15); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, nhi); + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 2); + + tb_tunnel_free(tunnel); +} + +static void tb_test_tunnel_dma_tx(struct kunit *test) +{ + struct tb_port *nhi, *port; + struct tb_tunnel *tunnel; + struct tb_switch *host; + + /* + * Create DMA TX tunnel from NHI to port 1. + * + * [Host 1] + * 1 | In HopID 2 -> Out HopID 15 + * | + * v + * ............ Domain border + * | + * [Host 2] + */ + host = alloc_host(test); + nhi = &host->ports[7]; + port = &host->ports[1]; + + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 2, -1, -1); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA); + KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi); + KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port); + KUNIT_ASSERT_EQ(test, tunnel->npaths, 1); + /* TX path */ + KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 1); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, nhi); + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 2); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, port); + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].next_hop_index, 15); + + tb_tunnel_free(tunnel); +} + +static void tb_test_tunnel_dma_chain(struct kunit *test) +{ + struct tb_switch *host, *dev1, *dev2; + struct tb_port *nhi, *port; + struct tb_tunnel *tunnel; + + /* + * Create DMA tunnel from NHI to Device #2 port 3 and back. + * + * [Host 1] + * 1 ^ In HopID 1 -> Out HopID x + * | + * 1 | In HopID x -> Out HopID 1 + * [Device #1] + * 7 \ + * 1 \ + * [Device #2] + * 3 | In HopID x -> Out HopID 8 + * | + * v In HopID 8 -> Out HopID x + * ............ Domain border + * | + * [Host 2] + */ + host = alloc_host(test); + dev1 = alloc_dev_default(test, host, 0x1, true); + dev2 = alloc_dev_default(test, dev1, 0x701, true); + + nhi = &host->ports[7]; + port = &dev2->ports[3]; + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_EXPECT_EQ(test, tunnel->type, TB_TUNNEL_DMA); + KUNIT_EXPECT_PTR_EQ(test, tunnel->src_port, nhi); + KUNIT_EXPECT_PTR_EQ(test, tunnel->dst_port, port); + KUNIT_ASSERT_EQ(test, tunnel->npaths, 2); + /* RX path */ + KUNIT_ASSERT_EQ(test, tunnel->paths[0]->path_length, 3); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].in_port, port); + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[0].in_hop_index, 8); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[0].out_port, + &dev2->ports[1]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].in_port, + &dev1->ports[7]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[1].out_port, + &dev1->ports[1]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].in_port, + &host->ports[1]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[0]->hops[2].out_port, nhi); + KUNIT_EXPECT_EQ(test, tunnel->paths[0]->hops[2].next_hop_index, 1); + /* TX path */ + KUNIT_ASSERT_EQ(test, tunnel->paths[1]->path_length, 3); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[0].in_port, nhi); + KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[0].in_hop_index, 1); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].in_port, + &dev1->ports[1]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[1].out_port, + &dev1->ports[7]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].in_port, + &dev2->ports[1]); + KUNIT_EXPECT_PTR_EQ(test, tunnel->paths[1]->hops[2].out_port, port); + KUNIT_EXPECT_EQ(test, tunnel->paths[1]->hops[2].next_hop_index, 8); + + tb_tunnel_free(tunnel); +} + +static void tb_test_tunnel_dma_match(struct kunit *test) +{ + struct tb_port *nhi, *port; + struct tb_tunnel *tunnel; + struct tb_switch *host; + + host = alloc_host(test); + nhi = &host->ports[7]; + port = &host->ports[1]; + + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, 15, 1); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1)); + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, 1, 15, 1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1)); + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 8, -1, 8, -1)); + + tb_tunnel_free(tunnel); + + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 15, 1, -1, -1); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, 1, -1, -1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, 15, -1, -1, -1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, 1, -1, -1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1)); + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 1, 15, 1)); + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1)); + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1)); + + tb_tunnel_free(tunnel); + + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, -1, -1, 15, 11); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 11)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, -1)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, 11)); + KUNIT_ASSERT_TRUE(test, tb_tunnel_match_dma(tunnel, -1, -1, -1, -1)); + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 15, 1)); + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, -1, -1, 10, 11)); + KUNIT_ASSERT_FALSE(test, tb_tunnel_match_dma(tunnel, 15, 11, -1, -1)); + + tb_tunnel_free(tunnel); +} + +static void tb_test_credit_alloc_legacy_not_bonded(struct kunit *test) +{ + struct tb_switch *host, *dev; + struct tb_port *up, *down; + struct tb_tunnel *tunnel; + struct tb_path *path; + + host = alloc_host(test); + dev = alloc_dev_default(test, host, 0x1, false); + + down = &host->ports[8]; + up = &dev->ports[9]; + tunnel = tb_tunnel_alloc_pci(NULL, up, down); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2); + + path = tunnel->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U); + + path = tunnel->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 16U); + + tb_tunnel_free(tunnel); +} + +static void tb_test_credit_alloc_legacy_bonded(struct kunit *test) +{ + struct tb_switch *host, *dev; + struct tb_port *up, *down; + struct tb_tunnel *tunnel; + struct tb_path *path; + + host = alloc_host(test); + dev = alloc_dev_default(test, host, 0x1, true); + + down = &host->ports[8]; + up = &dev->ports[9]; + tunnel = tb_tunnel_alloc_pci(NULL, up, down); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2); + + path = tunnel->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U); + + path = tunnel->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U); + + tb_tunnel_free(tunnel); +} + +static void tb_test_credit_alloc_pcie(struct kunit *test) +{ + struct tb_switch *host, *dev; + struct tb_port *up, *down; + struct tb_tunnel *tunnel; + struct tb_path *path; + + host = alloc_host_usb4(test); + dev = alloc_dev_usb4(test, host, 0x1, true); + + down = &host->ports[8]; + up = &dev->ports[9]; + tunnel = tb_tunnel_alloc_pci(NULL, up, down); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2); + + path = tunnel->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U); + + path = tunnel->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U); + + tb_tunnel_free(tunnel); +} + +static void tb_test_credit_alloc_without_dp(struct kunit *test) +{ + struct tb_switch *host, *dev; + struct tb_port *up, *down; + struct tb_tunnel *tunnel; + struct tb_path *path; + + host = alloc_host_usb4(test); + dev = alloc_dev_without_dp(test, host, 0x1, true); + + /* + * The device has no DP therefore baMinDPmain = baMinDPaux = 0 + * + * Create PCIe path with buffers less than baMaxPCIe. + * + * For a device with buffers configurations: + * baMaxUSB3 = 109 + * baMinDPaux = 0 + * baMinDPmain = 0 + * baMaxPCIe = 30 + * baMaxHI = 1 + * Remaining Buffers = Total - (CP + DP) = 120 - (2 + 0) = 118 + * PCIe Credits = Max(6, Min(baMaxPCIe, Remaining Buffers - baMaxUSB3) + * = Max(6, Min(30, 9) = 9 + */ + down = &host->ports[8]; + up = &dev->ports[9]; + tunnel = tb_tunnel_alloc_pci(NULL, up, down); + KUNIT_ASSERT_TRUE(test, tunnel != NULL); + KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2); + + /* PCIe downstream path */ + path = tunnel->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 9U); + + /* PCIe upstream path */ + path = tunnel->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U); + + tb_tunnel_free(tunnel); +} + +static void tb_test_credit_alloc_dp(struct kunit *test) +{ + struct tb_switch *host, *dev; + struct tb_port *in, *out; + struct tb_tunnel *tunnel; + struct tb_path *path; + + host = alloc_host_usb4(test); + dev = alloc_dev_usb4(test, host, 0x1, true); + + in = &host->ports[5]; + out = &dev->ports[14]; + + tunnel = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)3); + + /* Video (main) path */ + path = tunnel->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U); + + /* AUX TX */ + path = tunnel->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U); + + /* AUX RX */ + path = tunnel->paths[2]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U); + + tb_tunnel_free(tunnel); +} + +static void tb_test_credit_alloc_usb3(struct kunit *test) +{ + struct tb_switch *host, *dev; + struct tb_port *up, *down; + struct tb_tunnel *tunnel; + struct tb_path *path; + + host = alloc_host_usb4(test); + dev = alloc_dev_usb4(test, host, 0x1, true); + + down = &host->ports[12]; + up = &dev->ports[16]; + tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2); + + path = tunnel->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); + + path = tunnel->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U); + + tb_tunnel_free(tunnel); +} + +static void tb_test_credit_alloc_dma(struct kunit *test) +{ + struct tb_switch *host, *dev; + struct tb_port *nhi, *port; + struct tb_tunnel *tunnel; + struct tb_path *path; + + host = alloc_host_usb4(test); + dev = alloc_dev_usb4(test, host, 0x1, true); + + nhi = &host->ports[7]; + port = &dev->ports[3]; + + tunnel = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1); + KUNIT_ASSERT_NOT_NULL(test, tunnel); + KUNIT_ASSERT_EQ(test, tunnel->npaths, (size_t)2); + + /* DMA RX */ + path = tunnel->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); + + /* DMA TX */ + path = tunnel->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); + + tb_tunnel_free(tunnel); +} + +static void tb_test_credit_alloc_dma_multiple(struct kunit *test) +{ + struct tb_tunnel *tunnel1, *tunnel2, *tunnel3; + struct tb_switch *host, *dev; + struct tb_port *nhi, *port; + struct tb_path *path; + + host = alloc_host_usb4(test); + dev = alloc_dev_usb4(test, host, 0x1, true); + + nhi = &host->ports[7]; + port = &dev->ports[3]; + + /* + * Create three DMA tunnels through the same ports. With the + * default buffers we should be able to create two and the last + * one fails. + * + * For default host we have following buffers for DMA: + * + * 120 - (2 + 2 * (1 + 0) + 32 + 64 + spare) = 20 + * + * For device we have following: + * + * 120 - (2 + 2 * (1 + 18) + 14 + 32 + spare) = 34 + * + * spare = 14 + 1 = 15 + * + * So on host the first tunnel gets 14 and the second gets the + * remaining 1 and then we run out of buffers. + */ + tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1); + KUNIT_ASSERT_NOT_NULL(test, tunnel1); + KUNIT_ASSERT_EQ(test, tunnel1->npaths, (size_t)2); + + path = tunnel1->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); + + path = tunnel1->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); + + tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2); + KUNIT_ASSERT_NOT_NULL(test, tunnel2); + KUNIT_ASSERT_EQ(test, tunnel2->npaths, (size_t)2); + + path = tunnel2->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U); + + path = tunnel2->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U); + + tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3); + KUNIT_ASSERT_NULL(test, tunnel3); + + /* + * Release the first DMA tunnel. That should make 14 buffers + * available for the next tunnel. + */ + tb_tunnel_free(tunnel1); + + tunnel3 = tb_tunnel_alloc_dma(NULL, nhi, port, 10, 3, 10, 3); + KUNIT_ASSERT_NOT_NULL(test, tunnel3); + + path = tunnel3->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); + + path = tunnel3->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); + + tb_tunnel_free(tunnel3); + tb_tunnel_free(tunnel2); +} + +static struct tb_tunnel *TB_TEST_PCIE_TUNNEL(struct kunit *test, + struct tb_switch *host, struct tb_switch *dev) +{ + struct tb_port *up, *down; + struct tb_tunnel *pcie_tunnel; + struct tb_path *path; + + down = &host->ports[8]; + up = &dev->ports[9]; + pcie_tunnel = tb_tunnel_alloc_pci(NULL, up, down); + KUNIT_ASSERT_NOT_NULL(test, pcie_tunnel); + KUNIT_ASSERT_EQ(test, pcie_tunnel->npaths, (size_t)2); + + path = pcie_tunnel->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U); + + path = pcie_tunnel->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 64U); + + return pcie_tunnel; +} + +static struct tb_tunnel *TB_TEST_DP_TUNNEL1(struct kunit *test, + struct tb_switch *host, struct tb_switch *dev) +{ + struct tb_port *in, *out; + struct tb_tunnel *dp_tunnel1; + struct tb_path *path; + + in = &host->ports[5]; + out = &dev->ports[13]; + dp_tunnel1 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, dp_tunnel1); + KUNIT_ASSERT_EQ(test, dp_tunnel1->npaths, (size_t)3); + + path = dp_tunnel1->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U); + + path = dp_tunnel1->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U); + + path = dp_tunnel1->paths[2]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U); + + return dp_tunnel1; +} + +static struct tb_tunnel *TB_TEST_DP_TUNNEL2(struct kunit *test, + struct tb_switch *host, struct tb_switch *dev) +{ + struct tb_port *in, *out; + struct tb_tunnel *dp_tunnel2; + struct tb_path *path; + + in = &host->ports[6]; + out = &dev->ports[14]; + dp_tunnel2 = tb_tunnel_alloc_dp(NULL, in, out, 1, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, dp_tunnel2); + KUNIT_ASSERT_EQ(test, dp_tunnel2->npaths, (size_t)3); + + path = dp_tunnel2->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 12U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 18U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 0U); + + path = dp_tunnel2->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U); + + path = dp_tunnel2->paths[2]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 1U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U); + + return dp_tunnel2; +} + +static struct tb_tunnel *TB_TEST_USB3_TUNNEL(struct kunit *test, + struct tb_switch *host, struct tb_switch *dev) +{ + struct tb_port *up, *down; + struct tb_tunnel *usb3_tunnel; + struct tb_path *path; + + down = &host->ports[12]; + up = &dev->ports[16]; + usb3_tunnel = tb_tunnel_alloc_usb3(NULL, up, down, 0, 0); + KUNIT_ASSERT_NOT_NULL(test, usb3_tunnel); + KUNIT_ASSERT_EQ(test, usb3_tunnel->npaths, (size_t)2); + + path = usb3_tunnel->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); + + path = usb3_tunnel->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 7U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 32U); + + return usb3_tunnel; +} + +static struct tb_tunnel *TB_TEST_DMA_TUNNEL1(struct kunit *test, + struct tb_switch *host, struct tb_switch *dev) +{ + struct tb_port *nhi, *port; + struct tb_tunnel *dma_tunnel1; + struct tb_path *path; + + nhi = &host->ports[7]; + port = &dev->ports[3]; + dma_tunnel1 = tb_tunnel_alloc_dma(NULL, nhi, port, 8, 1, 8, 1); + KUNIT_ASSERT_NOT_NULL(test, dma_tunnel1); + KUNIT_ASSERT_EQ(test, dma_tunnel1->npaths, (size_t)2); + + path = dma_tunnel1->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); + + path = dma_tunnel1->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 14U); + + return dma_tunnel1; +} + +static struct tb_tunnel *TB_TEST_DMA_TUNNEL2(struct kunit *test, + struct tb_switch *host, struct tb_switch *dev) +{ + struct tb_port *nhi, *port; + struct tb_tunnel *dma_tunnel2; + struct tb_path *path; + + nhi = &host->ports[7]; + port = &dev->ports[3]; + dma_tunnel2 = tb_tunnel_alloc_dma(NULL, nhi, port, 9, 2, 9, 2); + KUNIT_ASSERT_NOT_NULL(test, dma_tunnel2); + KUNIT_ASSERT_EQ(test, dma_tunnel2->npaths, (size_t)2); + + path = dma_tunnel2->paths[0]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 14U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U); + + path = dma_tunnel2->paths[1]; + KUNIT_ASSERT_EQ(test, path->path_length, 2); + KUNIT_EXPECT_EQ(test, path->hops[0].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[0].initial_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].nfc_credits, 0U); + KUNIT_EXPECT_EQ(test, path->hops[1].initial_credits, 1U); + + return dma_tunnel2; +} + +static void tb_test_credit_alloc_all(struct kunit *test) +{ + struct tb_tunnel *pcie_tunnel, *dp_tunnel1, *dp_tunnel2, *usb3_tunnel; + struct tb_tunnel *dma_tunnel1, *dma_tunnel2; + struct tb_switch *host, *dev; + + /* + * Create PCIe, 2 x DP, USB 3.x and two DMA tunnels from host to + * device. Expectation is that all these can be established with + * the default credit allocation found in Intel hardware. + */ + + host = alloc_host_usb4(test); + dev = alloc_dev_usb4(test, host, 0x1, true); + + pcie_tunnel = TB_TEST_PCIE_TUNNEL(test, host, dev); + dp_tunnel1 = TB_TEST_DP_TUNNEL1(test, host, dev); + dp_tunnel2 = TB_TEST_DP_TUNNEL2(test, host, dev); + usb3_tunnel = TB_TEST_USB3_TUNNEL(test, host, dev); + dma_tunnel1 = TB_TEST_DMA_TUNNEL1(test, host, dev); + dma_tunnel2 = TB_TEST_DMA_TUNNEL2(test, host, dev); + + tb_tunnel_free(dma_tunnel2); + tb_tunnel_free(dma_tunnel1); + tb_tunnel_free(usb3_tunnel); + tb_tunnel_free(dp_tunnel2); + tb_tunnel_free(dp_tunnel1); + tb_tunnel_free(pcie_tunnel); +} + +static const u32 root_directory[] = { + 0x55584401, /* "UXD" v1 */ + 0x00000018, /* Root directory length */ + 0x76656e64, /* "vend" */ + 0x6f726964, /* "orid" */ + 0x76000001, /* "v" R 1 */ + 0x00000a27, /* Immediate value, ! Vendor ID */ + 0x76656e64, /* "vend" */ + 0x6f726964, /* "orid" */ + 0x74000003, /* "t" R 3 */ + 0x0000001a, /* Text leaf offset, (“Apple Inc.”) */ + 0x64657669, /* "devi" */ + 0x63656964, /* "ceid" */ + 0x76000001, /* "v" R 1 */ + 0x0000000a, /* Immediate value, ! Device ID */ + 0x64657669, /* "devi" */ + 0x63656964, /* "ceid" */ + 0x74000003, /* "t" R 3 */ + 0x0000001d, /* Text leaf offset, (“Macintosh”) */ + 0x64657669, /* "devi" */ + 0x63657276, /* "cerv" */ + 0x76000001, /* "v" R 1 */ + 0x80000100, /* Immediate value, Device Revision */ + 0x6e657477, /* "netw" */ + 0x6f726b00, /* "ork" */ + 0x44000014, /* "D" R 20 */ + 0x00000021, /* Directory data offset, (Network Directory) */ + 0x4170706c, /* "Appl" */ + 0x6520496e, /* "e In" */ + 0x632e0000, /* "c." ! */ + 0x4d616369, /* "Maci" */ + 0x6e746f73, /* "ntos" */ + 0x68000000, /* "h" */ + 0x00000000, /* padding */ + 0xca8961c6, /* Directory UUID, Network Directory */ + 0x9541ce1c, /* Directory UUID, Network Directory */ + 0x5949b8bd, /* Directory UUID, Network Directory */ + 0x4f5a5f2e, /* Directory UUID, Network Directory */ + 0x70727463, /* "prtc" */ + 0x69640000, /* "id" */ + 0x76000001, /* "v" R 1 */ + 0x00000001, /* Immediate value, Network Protocol ID */ + 0x70727463, /* "prtc" */ + 0x76657273, /* "vers" */ + 0x76000001, /* "v" R 1 */ + 0x00000001, /* Immediate value, Network Protocol Version */ + 0x70727463, /* "prtc" */ + 0x72657673, /* "revs" */ + 0x76000001, /* "v" R 1 */ + 0x00000001, /* Immediate value, Network Protocol Revision */ + 0x70727463, /* "prtc" */ + 0x73746e73, /* "stns" */ + 0x76000001, /* "v" R 1 */ + 0x00000000, /* Immediate value, Network Protocol Settings */ +}; + +static const uuid_t network_dir_uuid = + UUID_INIT(0xc66189ca, 0x1cce, 0x4195, + 0xbd, 0xb8, 0x49, 0x59, 0x2e, 0x5f, 0x5a, 0x4f); + +static void tb_test_property_parse(struct kunit *test) +{ + struct tb_property_dir *dir, *network_dir; + struct tb_property *p; + + dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory)); + KUNIT_ASSERT_NOT_NULL(test, dir); + + p = tb_property_find(dir, "foo", TB_PROPERTY_TYPE_TEXT); + KUNIT_ASSERT_NULL(test, p); + + p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_TEXT); + KUNIT_ASSERT_NOT_NULL(test, p); + KUNIT_EXPECT_STREQ(test, p->value.text, "Apple Inc."); + + p = tb_property_find(dir, "vendorid", TB_PROPERTY_TYPE_VALUE); + KUNIT_ASSERT_NOT_NULL(test, p); + KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa27); + + p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_TEXT); + KUNIT_ASSERT_NOT_NULL(test, p); + KUNIT_EXPECT_STREQ(test, p->value.text, "Macintosh"); + + p = tb_property_find(dir, "deviceid", TB_PROPERTY_TYPE_VALUE); + KUNIT_ASSERT_NOT_NULL(test, p); + KUNIT_EXPECT_EQ(test, p->value.immediate, 0xa); + + p = tb_property_find(dir, "missing", TB_PROPERTY_TYPE_DIRECTORY); + KUNIT_ASSERT_NULL(test, p); + + p = tb_property_find(dir, "network", TB_PROPERTY_TYPE_DIRECTORY); + KUNIT_ASSERT_NOT_NULL(test, p); + + network_dir = p->value.dir; + KUNIT_EXPECT_TRUE(test, uuid_equal(network_dir->uuid, &network_dir_uuid)); + + p = tb_property_find(network_dir, "prtcid", TB_PROPERTY_TYPE_VALUE); + KUNIT_ASSERT_NOT_NULL(test, p); + KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1); + + p = tb_property_find(network_dir, "prtcvers", TB_PROPERTY_TYPE_VALUE); + KUNIT_ASSERT_NOT_NULL(test, p); + KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1); + + p = tb_property_find(network_dir, "prtcrevs", TB_PROPERTY_TYPE_VALUE); + KUNIT_ASSERT_NOT_NULL(test, p); + KUNIT_EXPECT_EQ(test, p->value.immediate, 0x1); + + p = tb_property_find(network_dir, "prtcstns", TB_PROPERTY_TYPE_VALUE); + KUNIT_ASSERT_NOT_NULL(test, p); + KUNIT_EXPECT_EQ(test, p->value.immediate, 0x0); + + p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_VALUE); + KUNIT_EXPECT_TRUE(test, !p); + p = tb_property_find(network_dir, "deviceid", TB_PROPERTY_TYPE_TEXT); + KUNIT_EXPECT_TRUE(test, !p); + + tb_property_free_dir(dir); +} + +static void tb_test_property_format(struct kunit *test) +{ + struct tb_property_dir *dir; + ssize_t block_len; + u32 *block; + int ret, i; + + dir = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory)); + KUNIT_ASSERT_NOT_NULL(test, dir); + + ret = tb_property_format_dir(dir, NULL, 0); + KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory)); + + block_len = ret; + + block = kunit_kzalloc(test, block_len * sizeof(u32), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, block); + + ret = tb_property_format_dir(dir, block, block_len); + KUNIT_EXPECT_EQ(test, ret, 0); + + for (i = 0; i < ARRAY_SIZE(root_directory); i++) + KUNIT_EXPECT_EQ(test, root_directory[i], block[i]); + + tb_property_free_dir(dir); +} + +static void compare_dirs(struct kunit *test, struct tb_property_dir *d1, + struct tb_property_dir *d2) +{ + struct tb_property *p1, *p2, *tmp; + int n1, n2, i; + + if (d1->uuid) { + KUNIT_ASSERT_NOT_NULL(test, d2->uuid); + KUNIT_ASSERT_TRUE(test, uuid_equal(d1->uuid, d2->uuid)); + } else { + KUNIT_ASSERT_NULL(test, d2->uuid); + } + + n1 = 0; + tb_property_for_each(d1, tmp) + n1++; + KUNIT_ASSERT_NE(test, n1, 0); + + n2 = 0; + tb_property_for_each(d2, tmp) + n2++; + KUNIT_ASSERT_NE(test, n2, 0); + + KUNIT_ASSERT_EQ(test, n1, n2); + + p1 = NULL; + p2 = NULL; + for (i = 0; i < n1; i++) { + p1 = tb_property_get_next(d1, p1); + KUNIT_ASSERT_NOT_NULL(test, p1); + p2 = tb_property_get_next(d2, p2); + KUNIT_ASSERT_NOT_NULL(test, p2); + + KUNIT_ASSERT_STREQ(test, &p1->key[0], &p2->key[0]); + KUNIT_ASSERT_EQ(test, p1->type, p2->type); + KUNIT_ASSERT_EQ(test, p1->length, p2->length); + + switch (p1->type) { + case TB_PROPERTY_TYPE_DIRECTORY: + KUNIT_ASSERT_NOT_NULL(test, p1->value.dir); + KUNIT_ASSERT_NOT_NULL(test, p2->value.dir); + compare_dirs(test, p1->value.dir, p2->value.dir); + break; + + case TB_PROPERTY_TYPE_DATA: + KUNIT_ASSERT_NOT_NULL(test, p1->value.data); + KUNIT_ASSERT_NOT_NULL(test, p2->value.data); + KUNIT_ASSERT_TRUE(test, + !memcmp(p1->value.data, p2->value.data, + p1->length * 4) + ); + break; + + case TB_PROPERTY_TYPE_TEXT: + KUNIT_ASSERT_NOT_NULL(test, p1->value.text); + KUNIT_ASSERT_NOT_NULL(test, p2->value.text); + KUNIT_ASSERT_STREQ(test, p1->value.text, p2->value.text); + break; + + case TB_PROPERTY_TYPE_VALUE: + KUNIT_ASSERT_EQ(test, p1->value.immediate, + p2->value.immediate); + break; + default: + KUNIT_FAIL(test, "unexpected property type"); + break; + } + } +} + +static void tb_test_property_copy(struct kunit *test) +{ + struct tb_property_dir *src, *dst; + u32 *block; + int ret, i; + + src = tb_property_parse_dir(root_directory, ARRAY_SIZE(root_directory)); + KUNIT_ASSERT_NOT_NULL(test, src); + + dst = tb_property_copy_dir(src); + KUNIT_ASSERT_NOT_NULL(test, dst); + + /* Compare the structures */ + compare_dirs(test, src, dst); + + /* Compare the resulting property block */ + ret = tb_property_format_dir(dst, NULL, 0); + KUNIT_ASSERT_EQ(test, ret, ARRAY_SIZE(root_directory)); + + block = kunit_kzalloc(test, sizeof(root_directory), GFP_KERNEL); + KUNIT_ASSERT_NOT_NULL(test, block); + + ret = tb_property_format_dir(dst, block, ARRAY_SIZE(root_directory)); + KUNIT_EXPECT_TRUE(test, !ret); + + for (i = 0; i < ARRAY_SIZE(root_directory); i++) + KUNIT_EXPECT_EQ(test, root_directory[i], block[i]); + + tb_property_free_dir(dst); + tb_property_free_dir(src); +} + +static struct kunit_case tb_test_cases[] = { + KUNIT_CASE(tb_test_path_basic), + KUNIT_CASE(tb_test_path_not_connected_walk), + KUNIT_CASE(tb_test_path_single_hop_walk), + KUNIT_CASE(tb_test_path_daisy_chain_walk), + KUNIT_CASE(tb_test_path_simple_tree_walk), + KUNIT_CASE(tb_test_path_complex_tree_walk), + KUNIT_CASE(tb_test_path_max_length_walk), + KUNIT_CASE(tb_test_path_not_connected), + KUNIT_CASE(tb_test_path_not_bonded_lane0), + KUNIT_CASE(tb_test_path_not_bonded_lane1), + KUNIT_CASE(tb_test_path_not_bonded_lane1_chain), + KUNIT_CASE(tb_test_path_not_bonded_lane1_chain_reverse), + KUNIT_CASE(tb_test_path_mixed_chain), + KUNIT_CASE(tb_test_path_mixed_chain_reverse), + KUNIT_CASE(tb_test_tunnel_pcie), + KUNIT_CASE(tb_test_tunnel_dp), + KUNIT_CASE(tb_test_tunnel_dp_chain), + KUNIT_CASE(tb_test_tunnel_dp_tree), + KUNIT_CASE(tb_test_tunnel_dp_max_length), + KUNIT_CASE(tb_test_tunnel_port_on_path), + KUNIT_CASE(tb_test_tunnel_usb3), + KUNIT_CASE(tb_test_tunnel_dma), + KUNIT_CASE(tb_test_tunnel_dma_rx), + KUNIT_CASE(tb_test_tunnel_dma_tx), + KUNIT_CASE(tb_test_tunnel_dma_chain), + KUNIT_CASE(tb_test_tunnel_dma_match), + KUNIT_CASE(tb_test_credit_alloc_legacy_not_bonded), + KUNIT_CASE(tb_test_credit_alloc_legacy_bonded), + KUNIT_CASE(tb_test_credit_alloc_pcie), + KUNIT_CASE(tb_test_credit_alloc_without_dp), + KUNIT_CASE(tb_test_credit_alloc_dp), + KUNIT_CASE(tb_test_credit_alloc_usb3), + KUNIT_CASE(tb_test_credit_alloc_dma), + KUNIT_CASE(tb_test_credit_alloc_dma_multiple), + KUNIT_CASE(tb_test_credit_alloc_all), + KUNIT_CASE(tb_test_property_parse), + KUNIT_CASE(tb_test_property_format), + KUNIT_CASE(tb_test_property_copy), + { } +}; + +static struct kunit_suite tb_test_suite = { + .name = "thunderbolt", + .test_cases = tb_test_cases, +}; + +kunit_test_suite(tb_test_suite); diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c index 039c42a06000..626aca3124b1 100644 --- a/drivers/thunderbolt/tmu.c +++ b/drivers/thunderbolt/tmu.c @@ -11,6 +11,55 @@ #include "tb.h" +static int tb_switch_set_tmu_mode_params(struct tb_switch *sw, + enum tb_switch_tmu_rate rate) +{ + u32 freq_meas_wind[2] = { 30, 800 }; + u32 avg_const[2] = { 4, 8 }; + u32 freq, avg, val; + int ret; + + if (rate == TB_SWITCH_TMU_RATE_NORMAL) { + freq = freq_meas_wind[0]; + avg = avg_const[0]; + } else if (rate == TB_SWITCH_TMU_RATE_HIFI) { + freq = freq_meas_wind[1]; + avg = avg_const[1]; + } else { + return 0; + } + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_0, 1); + if (ret) + return ret; + + val &= ~TMU_RTR_CS_0_FREQ_WIND_MASK; + val |= FIELD_PREP(TMU_RTR_CS_0_FREQ_WIND_MASK, freq); + + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_0, 1); + if (ret) + return ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_15, 1); + if (ret) + return ret; + + val &= ~TMU_RTR_CS_15_FREQ_AVG_MASK & + ~TMU_RTR_CS_15_DELAY_AVG_MASK & + ~TMU_RTR_CS_15_OFFSET_AVG_MASK & + ~TMU_RTR_CS_15_ERROR_AVG_MASK; + val |= FIELD_PREP(TMU_RTR_CS_15_FREQ_AVG_MASK, avg) | + FIELD_PREP(TMU_RTR_CS_15_DELAY_AVG_MASK, avg) | + FIELD_PREP(TMU_RTR_CS_15_OFFSET_AVG_MASK, avg) | + FIELD_PREP(TMU_RTR_CS_15_ERROR_AVG_MASK, avg); + + return tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_15, 1); +} + static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw) { bool root_switch = !tb_route(sw); @@ -115,6 +164,11 @@ static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port) return tb_port_tmu_set_unidirectional(port, false); } +static inline int tb_port_tmu_unidirectional_enable(struct tb_port *port) +{ + return tb_port_tmu_set_unidirectional(port, true); +} + static bool tb_port_tmu_is_unidirectional(struct tb_port *port) { int ret; @@ -128,23 +182,46 @@ static bool tb_port_tmu_is_unidirectional(struct tb_port *port) return val & TMU_ADP_CS_3_UDM; } +static int tb_port_tmu_time_sync(struct tb_port *port, bool time_sync) +{ + u32 val = time_sync ? TMU_ADP_CS_6_DTS : 0; + + return tb_port_tmu_write(port, TMU_ADP_CS_6, TMU_ADP_CS_6_DTS, val); +} + +static int tb_port_tmu_time_sync_disable(struct tb_port *port) +{ + return tb_port_tmu_time_sync(port, true); +} + +static int tb_port_tmu_time_sync_enable(struct tb_port *port) +{ + return tb_port_tmu_time_sync(port, false); +} + static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set) { + u32 val, offset, bit; int ret; - u32 val; - ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, - sw->tmu.cap + TMU_RTR_CS_0, 1); + if (tb_switch_is_usb4(sw)) { + offset = sw->tmu.cap + TMU_RTR_CS_0; + bit = TMU_RTR_CS_0_TD; + } else { + offset = sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_26; + bit = TB_TIME_VSEC_3_CS_26_TD; + } + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); if (ret) return ret; if (set) - val |= TMU_RTR_CS_0_TD; + val |= bit; else - val &= ~TMU_RTR_CS_0_TD; + val &= ~bit; - return tb_sw_write(sw, &val, TB_CFG_SWITCH, - sw->tmu.cap + TMU_RTR_CS_0, 1); + return tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1); } /** @@ -207,7 +284,8 @@ int tb_switch_tmu_init(struct tb_switch *sw) */ int tb_switch_tmu_post_time(struct tb_switch *sw) { - unsigned int post_local_time_offset, post_time_offset; + unsigned int post_time_high_offset, post_time_high = 0; + unsigned int post_local_time_offset, post_time_offset; struct tb_switch *root_switch = sw->tb->root_switch; u64 hi, mid, lo, local_time, post_time; int i, ret, retries = 100; @@ -247,6 +325,7 @@ int tb_switch_tmu_post_time(struct tb_switch *sw) post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22; post_time_offset = sw->tmu.cap + TMU_RTR_CS_24; + post_time_high_offset = sw->tmu.cap + TMU_RTR_CS_25; /* * Write the Grandmaster time to the Post Local Time registers @@ -258,17 +337,24 @@ int tb_switch_tmu_post_time(struct tb_switch *sw) goto out; /* - * Have the new switch update its local time (by writing 1 to - * the post_time registers) and wait for the completion of the - * same (post_time register becomes 0). This means the time has - * been converged properly. + * Have the new switch update its local time by: + * 1) writing 0x1 to the Post Time Low register and 0xffffffff to + * Post Time High register. + * 2) write 0 to Post Time High register and then wait for + * the completion of the post_time register becomes 0. + * This means the time has been converged properly. */ - post_time = 1; + post_time = 0xffffffff00000001ULL; ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2); if (ret) goto out; + ret = tb_sw_write(sw, &post_time_high, TB_CFG_SWITCH, + post_time_high_offset, 1); + if (ret) + goto out; + do { usleep_range(5, 10); ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH, @@ -297,30 +383,55 @@ out: */ int tb_switch_tmu_disable(struct tb_switch *sw) { - int ret; - - if (!tb_switch_is_usb4(sw)) + /* + * No need to disable TMU on devices that don't support CLx since + * on these devices e.g. Alpine Ridge and earlier, the TMU mode + * HiFi bi-directional is enabled by default and we don't change it. + */ + if (!tb_switch_is_clx_supported(sw)) return 0; /* Already disabled? */ if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) return 0; - if (sw->tmu.unidirectional) { + + if (tb_route(sw)) { + bool unidirectional = sw->tmu.unidirectional; struct tb_switch *parent = tb_switch_parent(sw); - struct tb_port *up, *down; + struct tb_port *down, *up; + int ret; - up = tb_upstream_port(sw); down = tb_port_at(tb_route(sw), parent); - - /* The switch may be unplugged so ignore any errors */ - tb_port_tmu_unidirectional_disable(up); - ret = tb_port_tmu_unidirectional_disable(down); + up = tb_upstream_port(sw); + /* + * In case of uni-directional time sync, TMU handshake is + * initiated by upstream router. In case of bi-directional + * time sync, TMU handshake is initiated by downstream router. + * We change downstream router's rate to off for both uni/bidir + * cases although it is needed only for the bi-directional mode. + * We avoid changing upstream router's mode since it might + * have another downstream router plugged, that is set to + * uni-directional mode and we don't want to change it's TMU + * mode. + */ + tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF); + + tb_port_tmu_time_sync_disable(up); + ret = tb_port_tmu_time_sync_disable(down); if (ret) return ret; - } - tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF); + if (unidirectional) { + /* The switch may be unplugged so ignore any errors */ + tb_port_tmu_unidirectional_disable(up); + ret = tb_port_tmu_unidirectional_disable(down); + if (ret) + return ret; + } + } else { + tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF); + } sw->tmu.unidirectional = false; sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF; @@ -329,55 +440,334 @@ int tb_switch_tmu_disable(struct tb_switch *sw) return 0; } +static void __tb_switch_tmu_off(struct tb_switch *sw, bool unidirectional) +{ + struct tb_switch *parent = tb_switch_parent(sw); + struct tb_port *down, *up; + + down = tb_port_at(tb_route(sw), parent); + up = tb_upstream_port(sw); + /* + * In case of any failure in one of the steps when setting + * bi-directional or uni-directional TMU mode, get back to the TMU + * configurations in off mode. In case of additional failures in + * the functions below, ignore them since the caller shall already + * report a failure. + */ + tb_port_tmu_time_sync_disable(down); + tb_port_tmu_time_sync_disable(up); + if (unidirectional) + tb_switch_tmu_rate_write(parent, TB_SWITCH_TMU_RATE_OFF); + else + tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF); + + tb_switch_set_tmu_mode_params(sw, sw->tmu.rate); + tb_port_tmu_unidirectional_disable(down); + tb_port_tmu_unidirectional_disable(up); +} + +/* + * This function is called when the previous TMU mode was + * TB_SWITCH_TMU_RATE_OFF. + */ +static int __tb_switch_tmu_enable_bidirectional(struct tb_switch *sw) +{ + struct tb_switch *parent = tb_switch_parent(sw); + struct tb_port *up, *down; + int ret; + + up = tb_upstream_port(sw); + down = tb_port_at(tb_route(sw), parent); + + ret = tb_port_tmu_unidirectional_disable(up); + if (ret) + return ret; + + ret = tb_port_tmu_unidirectional_disable(down); + if (ret) + goto out; + + ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI); + if (ret) + goto out; + + ret = tb_port_tmu_time_sync_enable(up); + if (ret) + goto out; + + ret = tb_port_tmu_time_sync_enable(down); + if (ret) + goto out; + + return 0; + +out: + __tb_switch_tmu_off(sw, false); + return ret; +} + +static int tb_switch_tmu_objection_mask(struct tb_switch *sw) +{ + u32 val; + int ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1); + if (ret) + return ret; + + val &= ~TB_TIME_VSEC_3_CS_9_TMU_OBJ_MASK; + + return tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->cap_vsec_tmu + TB_TIME_VSEC_3_CS_9, 1); +} + +static int tb_switch_tmu_unidirectional_enable(struct tb_switch *sw) +{ + struct tb_port *up = tb_upstream_port(sw); + + return tb_port_tmu_write(up, TMU_ADP_CS_6, + TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK, + TMU_ADP_CS_6_DISABLE_TMU_OBJ_MASK); +} + +/* + * This function is called when the previous TMU mode was + * TB_SWITCH_TMU_RATE_OFF. + */ +static int __tb_switch_tmu_enable_unidirectional(struct tb_switch *sw) +{ + struct tb_switch *parent = tb_switch_parent(sw); + struct tb_port *up, *down; + int ret; + + up = tb_upstream_port(sw); + down = tb_port_at(tb_route(sw), parent); + ret = tb_switch_tmu_rate_write(parent, sw->tmu.rate_request); + if (ret) + return ret; + + ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request); + if (ret) + return ret; + + ret = tb_port_tmu_unidirectional_enable(up); + if (ret) + goto out; + + ret = tb_port_tmu_time_sync_enable(up); + if (ret) + goto out; + + ret = tb_port_tmu_unidirectional_enable(down); + if (ret) + goto out; + + ret = tb_port_tmu_time_sync_enable(down); + if (ret) + goto out; + + return 0; + +out: + __tb_switch_tmu_off(sw, true); + return ret; +} + +static void __tb_switch_tmu_change_mode_prev(struct tb_switch *sw) +{ + struct tb_switch *parent = tb_switch_parent(sw); + struct tb_port *down, *up; + + down = tb_port_at(tb_route(sw), parent); + up = tb_upstream_port(sw); + /* + * In case of any failure in one of the steps when change mode, + * get back to the TMU configurations in previous mode. + * In case of additional failures in the functions below, + * ignore them since the caller shall already report a failure. + */ + tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional); + if (sw->tmu.unidirectional_request) + tb_switch_tmu_rate_write(parent, sw->tmu.rate); + else + tb_switch_tmu_rate_write(sw, sw->tmu.rate); + + tb_switch_set_tmu_mode_params(sw, sw->tmu.rate); + tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional); +} + +static int __tb_switch_tmu_change_mode(struct tb_switch *sw) +{ + struct tb_switch *parent = tb_switch_parent(sw); + struct tb_port *up, *down; + int ret; + + up = tb_upstream_port(sw); + down = tb_port_at(tb_route(sw), parent); + ret = tb_port_tmu_set_unidirectional(down, sw->tmu.unidirectional_request); + if (ret) + goto out; + + if (sw->tmu.unidirectional_request) + ret = tb_switch_tmu_rate_write(parent, sw->tmu.rate_request); + else + ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request); + if (ret) + return ret; + + ret = tb_switch_set_tmu_mode_params(sw, sw->tmu.rate_request); + if (ret) + return ret; + + ret = tb_port_tmu_set_unidirectional(up, sw->tmu.unidirectional_request); + if (ret) + goto out; + + ret = tb_port_tmu_time_sync_enable(down); + if (ret) + goto out; + + ret = tb_port_tmu_time_sync_enable(up); + if (ret) + goto out; + + return 0; + +out: + __tb_switch_tmu_change_mode_prev(sw); + return ret; +} + /** - * tb_switch_tmu_enable() - Enable TMU on a switch - * @sw: Switch whose TMU to enable + * tb_switch_tmu_enable() - Enable TMU on a router + * @sw: Router whose TMU to enable * - * Enables TMU of a switch to be in bi-directional, HiFi mode. In this mode - * all tunneling should work. + * Enables TMU of a router to be in uni-directional Normal/HiFi + * or bi-directional HiFi mode. Calling tb_switch_tmu_configure() is required + * before calling this function, to select the mode Normal/HiFi and + * directionality (uni-directional/bi-directional). + * In HiFi mode all tunneling should work. In Normal mode, DP tunneling can't + * work. Uni-directional mode is required for CLx (Link Low-Power) to work. */ int tb_switch_tmu_enable(struct tb_switch *sw) { + bool unidirectional = sw->tmu.unidirectional_request; int ret; - if (!tb_switch_is_usb4(sw)) - return 0; + if (unidirectional && !sw->tmu.has_ucap) + return -EOPNOTSUPP; - if (tb_switch_tmu_is_enabled(sw)) + /* + * No need to enable TMU on devices that don't support CLx since on + * these devices e.g. Alpine Ridge and earlier, the TMU mode HiFi + * bi-directional is enabled by default. + */ + if (!tb_switch_is_clx_supported(sw)) return 0; - ret = tb_switch_tmu_set_time_disruption(sw, true); - if (ret) - return ret; - - /* Change mode to bi-directional */ - if (tb_route(sw) && sw->tmu.unidirectional) { - struct tb_switch *parent = tb_switch_parent(sw); - struct tb_port *up, *down; + if (tb_switch_tmu_is_enabled(sw, sw->tmu.unidirectional_request)) + return 0; - up = tb_upstream_port(sw); - down = tb_port_at(tb_route(sw), parent); + if (tb_switch_is_titan_ridge(sw) && unidirectional) { + /* + * Titan Ridge supports CL0s and CL1 only. CL0s and CL1 are + * enabled and supported together. + */ + if (!tb_switch_is_clx_enabled(sw, TB_CL1)) + return -EOPNOTSUPP; - ret = tb_port_tmu_unidirectional_disable(down); + ret = tb_switch_tmu_objection_mask(sw); if (ret) return ret; - ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI); + ret = tb_switch_tmu_unidirectional_enable(sw); if (ret) return ret; + } - ret = tb_port_tmu_unidirectional_disable(up); - if (ret) - return ret; + ret = tb_switch_tmu_set_time_disruption(sw, true); + if (ret) + return ret; + + if (tb_route(sw)) { + /* + * The used mode changes are from OFF to + * HiFi-Uni/HiFi-BiDir/Normal-Uni or from Normal-Uni to + * HiFi-Uni. + */ + if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) { + if (unidirectional) + ret = __tb_switch_tmu_enable_unidirectional(sw); + else + ret = __tb_switch_tmu_enable_bidirectional(sw); + if (ret) + return ret; + } else if (sw->tmu.rate == TB_SWITCH_TMU_RATE_NORMAL) { + ret = __tb_switch_tmu_change_mode(sw); + if (ret) + return ret; + } + sw->tmu.unidirectional = unidirectional; } else { - ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI); + /* + * Host router port configurations are written as + * part of configurations for downstream port of the parent + * of the child node - see above. + * Here only the host router' rate configuration is written. + */ + ret = tb_switch_tmu_rate_write(sw, sw->tmu.rate_request); if (ret) return ret; } - sw->tmu.unidirectional = false; - sw->tmu.rate = TB_SWITCH_TMU_RATE_HIFI; - tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw)); + sw->tmu.rate = sw->tmu.rate_request; + tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw)); return tb_switch_tmu_set_time_disruption(sw, false); } + +/** + * tb_switch_tmu_configure() - Configure the TMU rate and directionality + * @sw: Router whose mode to change + * @rate: Rate to configure Off/Normal/HiFi + * @unidirectional: If uni-directional (bi-directional otherwise) + * + * Selects the rate of the TMU and directionality (uni-directional or + * bi-directional). Must be called before tb_switch_tmu_enable(). + */ +void tb_switch_tmu_configure(struct tb_switch *sw, + enum tb_switch_tmu_rate rate, bool unidirectional) +{ + sw->tmu.unidirectional_request = unidirectional; + sw->tmu.rate_request = rate; +} + +static int tb_switch_tmu_config_enable(struct device *dev, void *rate) +{ + if (tb_is_switch(dev)) { + struct tb_switch *sw = tb_to_switch(dev); + + tb_switch_tmu_configure(sw, *(enum tb_switch_tmu_rate *)rate, + tb_switch_is_clx_enabled(sw, TB_CL1)); + if (tb_switch_tmu_enable(sw)) + tb_sw_dbg(sw, "fail switching TMU mode for 1st depth router\n"); + } + + return 0; +} + +/** + * tb_switch_enable_tmu_1st_child - Configure and enable TMU for 1st chidren + * @sw: The router to configure and enable it's children TMU + * @rate: Rate of the TMU to configure the router's chidren to + * + * Configures and enables the TMU mode of 1st depth children of the specified + * router to the specified rate. + */ +void tb_switch_enable_tmu_1st_child(struct tb_switch *sw, + enum tb_switch_tmu_rate rate) +{ + device_for_each_child(&sw->dev, &rate, + tb_switch_tmu_config_enable); +} diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index dbe90bcf4ad4..2c3cf7fc3357 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -34,8 +34,15 @@ #define TB_DP_AUX_PATH_OUT 1 #define TB_DP_AUX_PATH_IN 2 -#define TB_DMA_PATH_OUT 0 -#define TB_DMA_PATH_IN 1 +/* Minimum number of credits needed for PCIe path */ +#define TB_MIN_PCIE_CREDITS 6U +/* + * Number of credits we try to allocate for each DMA path if not limited + * by the host router baMaxHI. + */ +#define TB_DMA_CREDITS 14U +/* Minimum number of credits for DMA path */ +#define TB_MIN_DMA_CREDITS 1U static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" }; @@ -60,6 +67,58 @@ static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB3" }; #define tb_tunnel_dbg(tunnel, fmt, arg...) \ __TB_TUNNEL_PRINT(tb_dbg, tunnel, fmt, ##arg) +static inline unsigned int tb_usable_credits(const struct tb_port *port) +{ + return port->total_credits - port->ctl_credits; +} + +/** + * tb_available_credits() - Available credits for PCIe and DMA + * @port: Lane adapter to check + * @max_dp_streams: If non-%NULL stores maximum number of simultaneous DP + * streams possible through this lane adapter + */ +static unsigned int tb_available_credits(const struct tb_port *port, + size_t *max_dp_streams) +{ + const struct tb_switch *sw = port->sw; + int credits, usb3, pcie, spare; + size_t ndp; + + usb3 = tb_acpi_may_tunnel_usb3() ? sw->max_usb3_credits : 0; + pcie = tb_acpi_may_tunnel_pcie() ? sw->max_pcie_credits : 0; + + if (tb_acpi_is_xdomain_allowed()) { + spare = min_not_zero(sw->max_dma_credits, TB_DMA_CREDITS); + /* Add some credits for potential second DMA tunnel */ + spare += TB_MIN_DMA_CREDITS; + } else { + spare = 0; + } + + credits = tb_usable_credits(port); + if (tb_acpi_may_tunnel_dp()) { + /* + * Maximum number of DP streams possible through the + * lane adapter. + */ + if (sw->min_dp_aux_credits + sw->min_dp_main_credits) + ndp = (credits - (usb3 + pcie + spare)) / + (sw->min_dp_aux_credits + sw->min_dp_main_credits); + else + ndp = 0; + } else { + ndp = 0; + } + credits -= ndp * (sw->min_dp_aux_credits + sw->min_dp_main_credits); + credits -= usb3; + + if (max_dp_streams) + *max_dp_streams = ndp; + + return credits > 0 ? credits : 0; +} + static struct tb_tunnel *tb_tunnel_alloc(struct tb *tb, size_t npaths, enum tb_tunnel_type type) { @@ -97,24 +156,37 @@ static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate) return 0; } -static int tb_initial_credits(const struct tb_switch *sw) +static int tb_pci_init_credits(struct tb_path_hop *hop) { - /* If the path is complete sw is not NULL */ - if (sw) { - /* More credits for faster link */ - switch (sw->link_speed * sw->link_width) { - case 40: - return 32; - case 20: - return 24; - } + struct tb_port *port = hop->in_port; + struct tb_switch *sw = port->sw; + unsigned int credits; + + if (tb_port_use_credit_allocation(port)) { + unsigned int available; + + available = tb_available_credits(port, NULL); + credits = min(sw->max_pcie_credits, available); + + if (credits < TB_MIN_PCIE_CREDITS) + return -ENOSPC; + + credits = max(TB_MIN_PCIE_CREDITS, credits); + } else { + if (tb_port_is_null(port)) + credits = port->bonded ? 32 : 16; + else + credits = 7; } - return 16; + hop->initial_credits = credits; + return 0; } -static void tb_pci_init_path(struct tb_path *path) +static int tb_pci_init_path(struct tb_path *path) { + struct tb_path_hop *hop; + path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; path->egress_shared_buffer = TB_PATH_NONE; path->ingress_fc_enable = TB_PATH_ALL; @@ -122,22 +194,30 @@ static void tb_pci_init_path(struct tb_path *path) path->priority = 3; path->weight = 1; path->drop_packages = 0; - path->nfc_credits = 0; - path->hops[0].initial_credits = 7; - path->hops[1].initial_credits = - tb_initial_credits(path->hops[1].in_port->sw); + + tb_path_for_each_hop(path, hop) { + int ret; + + ret = tb_pci_init_credits(hop); + if (ret) + return ret; + } + + return 0; } /** * tb_tunnel_discover_pci() - Discover existing PCIe tunnels * @tb: Pointer to the domain structure * @down: PCIe downstream adapter + * @alloc_hopid: Allocate HopIDs from visited ports * * If @down adapter is active, follows the tunnel to the PCIe upstream * adapter and back. Returns the discovered tunnel or %NULL if there was * no tunnel. */ -struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down) +struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down, + bool alloc_hopid) { struct tb_tunnel *tunnel; struct tb_path *path; @@ -158,21 +238,23 @@ struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down) * case. */ path = tb_path_discover(down, TB_PCI_HOPID, NULL, -1, - &tunnel->dst_port, "PCIe Up"); + &tunnel->dst_port, "PCIe Up", alloc_hopid); if (!path) { /* Just disable the downstream port */ tb_pci_port_enable(down, false); goto err_free; } tunnel->paths[TB_PCI_PATH_UP] = path; - tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP]); + if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_UP])) + goto err_free; path = tb_path_discover(tunnel->dst_port, -1, down, TB_PCI_HOPID, NULL, - "PCIe Down"); + "PCIe Down", alloc_hopid); if (!path) goto err_deactivate; tunnel->paths[TB_PCI_PATH_DOWN] = path; - tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN]); + if (tb_pci_init_path(tunnel->paths[TB_PCI_PATH_DOWN])) + goto err_deactivate; /* Validate that the tunnel is complete */ if (!tb_port_is_pcie_up(tunnel->dst_port)) { @@ -230,23 +312,25 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, path = tb_path_alloc(tb, down, TB_PCI_HOPID, up, TB_PCI_HOPID, 0, "PCIe Down"); - if (!path) { - tb_tunnel_free(tunnel); - return NULL; - } - tb_pci_init_path(path); + if (!path) + goto err_free; tunnel->paths[TB_PCI_PATH_DOWN] = path; + if (tb_pci_init_path(path)) + goto err_free; path = tb_path_alloc(tb, up, TB_PCI_HOPID, down, TB_PCI_HOPID, 0, "PCIe Up"); - if (!path) { - tb_tunnel_free(tunnel); - return NULL; - } - tb_pci_init_path(path); + if (!path) + goto err_free; tunnel->paths[TB_PCI_PATH_UP] = path; + if (tb_pci_init_path(path)) + goto err_free; return tunnel; + +err_free: + tb_tunnel_free(tunnel); + return NULL; } static bool tb_dp_is_usb4(const struct tb_switch *sw) @@ -314,7 +398,7 @@ static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate) switch (rate) { default: WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate); - /* Fallthrough */ + fallthrough; case 1620: val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT; break; @@ -354,7 +438,7 @@ static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes) default: WARN(1, "invalid number of lanes %u passed, defaulting to 1\n", lanes); - /* Fallthrough */ + fallthrough; case 1: val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT; break; @@ -422,7 +506,7 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) u32 out_dp_cap, out_rate, out_lanes, in_dp_cap, in_rate, in_lanes, bw; struct tb_port *out = tunnel->dst_port; struct tb_port *in = tunnel->src_port; - int ret; + int ret, max_bw; /* * Copy DP_LOCAL_CAP register to DP_REMOTE_CAP register for @@ -471,10 +555,15 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", out_rate, out_lanes, bw); - if (tunnel->max_bw && bw > tunnel->max_bw) { + if (in->sw->config.depth < out->sw->config.depth) + max_bw = tunnel->max_down; + else + max_bw = tunnel->max_up; + + if (max_bw && bw > max_bw) { u32 new_rate, new_lanes, new_bw; - ret = tb_dp_reduce_bandwidth(tunnel->max_bw, in_rate, in_lanes, + ret = tb_dp_reduce_bandwidth(max_bw, in_rate, in_lanes, out_rate, out_lanes, &new_rate, &new_lanes); if (ret) { @@ -494,6 +583,16 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes); } + /* + * Titan Ridge does not disable AUX timers when it gets + * SET_CONFIG with SET_LTTPR_MODE set. This causes problems with + * DP tunneling. + */ + if (tb_route(out->sw) && tb_switch_is_titan_ridge(out->sw)) { + out_dp_cap |= DP_COMMON_CAP_LTTPR_NS; + tb_port_dbg(out, "disabling LTTPR\n"); + } + return tb_port_write(in, &out_dp_cap, TB_CFG_PORT, in->cap_adap + DP_REMOTE_CAP, 1); } @@ -535,7 +634,8 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active) return 0; } -static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel) +static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, + int *consumed_down) { struct tb_port *in = tunnel->src_port; const struct tb_switch *sw = in->sw; @@ -543,7 +643,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel) int ret; if (tb_dp_is_usb4(sw)) { - int timeout = 10; + int timeout = 20; /* * Wait for DPRX done. Normally it should be already set @@ -579,15 +679,36 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel) lanes = tb_dp_cap_get_lanes(val); } else { /* No bandwidth management for legacy devices */ + *consumed_up = 0; + *consumed_down = 0; return 0; } - return tb_dp_bandwidth(rate, lanes); + if (in->sw->config.depth < tunnel->dst_port->sw->config.depth) { + *consumed_up = 0; + *consumed_down = tb_dp_bandwidth(rate, lanes); + } else { + *consumed_up = tb_dp_bandwidth(rate, lanes); + *consumed_down = 0; + } + + return 0; +} + +static void tb_dp_init_aux_credits(struct tb_path_hop *hop) +{ + struct tb_port *port = hop->in_port; + struct tb_switch *sw = port->sw; + + if (tb_port_use_credit_allocation(port)) + hop->initial_credits = sw->min_dp_aux_credits; + else + hop->initial_credits = 1; } static void tb_dp_init_aux_path(struct tb_path *path) { - int i; + struct tb_path_hop *hop; path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; path->egress_shared_buffer = TB_PATH_NONE; @@ -596,13 +717,42 @@ static void tb_dp_init_aux_path(struct tb_path *path) path->priority = 2; path->weight = 1; - for (i = 0; i < path->path_length; i++) - path->hops[i].initial_credits = 1; + tb_path_for_each_hop(path, hop) + tb_dp_init_aux_credits(hop); } -static void tb_dp_init_video_path(struct tb_path *path, bool discover) +static int tb_dp_init_video_credits(struct tb_path_hop *hop) { - u32 nfc_credits = path->hops[0].in_port->config.nfc_credits; + struct tb_port *port = hop->in_port; + struct tb_switch *sw = port->sw; + + if (tb_port_use_credit_allocation(port)) { + unsigned int nfc_credits; + size_t max_dp_streams; + + tb_available_credits(port, &max_dp_streams); + /* + * Read the number of currently allocated NFC credits + * from the lane adapter. Since we only use them for DP + * tunneling we can use that to figure out how many DP + * tunnels already go through the lane adapter. + */ + nfc_credits = port->config.nfc_credits & + ADP_CS_4_NFC_BUFFERS_MASK; + if (nfc_credits / sw->min_dp_main_credits > max_dp_streams) + return -ENOSPC; + + hop->nfc_credits = sw->min_dp_main_credits; + } else { + hop->nfc_credits = min(port->total_credits - 2, 12U); + } + + return 0; +} + +static int tb_dp_init_video_path(struct tb_path *path) +{ + struct tb_path_hop *hop; path->egress_fc_enable = TB_PATH_NONE; path->egress_shared_buffer = TB_PATH_NONE; @@ -611,22 +761,22 @@ static void tb_dp_init_video_path(struct tb_path *path, bool discover) path->priority = 1; path->weight = 1; - if (discover) { - path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; - } else { - u32 max_credits; + tb_path_for_each_hop(path, hop) { + int ret; - max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> - ADP_CS_4_TOTAL_BUFFERS_SHIFT; - /* Leave some credits for AUX path */ - path->nfc_credits = min(max_credits - 2, 12U); + ret = tb_dp_init_video_credits(hop); + if (ret) + return ret; } + + return 0; } /** * tb_tunnel_discover_dp() - Discover existing Display Port tunnels * @tb: Pointer to the domain structure * @in: DP in adapter + * @alloc_hopid: Allocate HopIDs from visited ports * * If @in adapter is active, follows the tunnel to the DP out adapter * and back. Returns the discovered tunnel or %NULL if there was no @@ -634,7 +784,8 @@ static void tb_dp_init_video_path(struct tb_path *path, bool discover) * * Return: DP tunnel or %NULL if no tunnel found. */ -struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in) +struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, + bool alloc_hopid) { struct tb_tunnel *tunnel; struct tb_port *port; @@ -653,23 +804,25 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in) tunnel->src_port = in; path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1, - &tunnel->dst_port, "Video"); + &tunnel->dst_port, "Video", alloc_hopid); if (!path) { /* Just disable the DP IN port */ tb_dp_port_enable(in, false); goto err_free; } tunnel->paths[TB_DP_VIDEO_PATH_OUT] = path; - tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT], true); + if (tb_dp_init_video_path(tunnel->paths[TB_DP_VIDEO_PATH_OUT])) + goto err_free; - path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX"); + path = tb_path_discover(in, TB_DP_AUX_TX_HOPID, NULL, -1, NULL, "AUX TX", + alloc_hopid); if (!path) goto err_deactivate; tunnel->paths[TB_DP_AUX_PATH_OUT] = path; tb_dp_init_aux_path(tunnel->paths[TB_DP_AUX_PATH_OUT]); path = tb_path_discover(tunnel->dst_port, -1, in, TB_DP_AUX_RX_HOPID, - &port, "AUX RX"); + &port, "AUX RX", alloc_hopid); if (!path) goto err_deactivate; tunnel->paths[TB_DP_AUX_PATH_IN] = path; @@ -708,7 +861,11 @@ err_free: * @tb: Pointer to the domain structure * @in: DP in adapter port * @out: DP out adapter port - * @max_bw: Maximum available bandwidth for the DP tunnel (%0 if not limited) + * @link_nr: Preferred lane adapter when the link is not bonded + * @max_up: Maximum available upstream bandwidth for the DP tunnel (%0 + * if not limited) + * @max_down: Maximum available downstream bandwidth for the DP tunnel + * (%0 if not limited) * * Allocates a tunnel between @in and @out that is capable of tunneling * Display Port traffic. @@ -716,7 +873,8 @@ err_free: * Return: Returns a tb_tunnel on success or NULL on failure. */ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, - struct tb_port *out, int max_bw) + struct tb_port *out, int link_nr, + int max_up, int max_down) { struct tb_tunnel *tunnel; struct tb_path **paths; @@ -734,26 +892,27 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; tunnel->src_port = in; tunnel->dst_port = out; - tunnel->max_bw = max_bw; + tunnel->max_up = max_up; + tunnel->max_down = max_down; paths = tunnel->paths; path = tb_path_alloc(tb, in, TB_DP_VIDEO_HOPID, out, TB_DP_VIDEO_HOPID, - 1, "Video"); + link_nr, "Video"); if (!path) goto err_free; - tb_dp_init_video_path(path, false); + tb_dp_init_video_path(path); paths[TB_DP_VIDEO_PATH_OUT] = path; path = tb_path_alloc(tb, in, TB_DP_AUX_TX_HOPID, out, - TB_DP_AUX_TX_HOPID, 1, "AUX TX"); + TB_DP_AUX_TX_HOPID, link_nr, "AUX TX"); if (!path) goto err_free; tb_dp_init_aux_path(path); paths[TB_DP_AUX_PATH_OUT] = path; path = tb_path_alloc(tb, out, TB_DP_AUX_RX_HOPID, in, - TB_DP_AUX_RX_HOPID, 1, "AUX RX"); + TB_DP_AUX_RX_HOPID, link_nr, "AUX RX"); if (!path) goto err_free; tb_dp_init_aux_path(path); @@ -766,39 +925,139 @@ err_free: return NULL; } -static u32 tb_dma_credits(struct tb_port *nhi) +static unsigned int tb_dma_available_credits(const struct tb_port *port) { - u32 max_credits; + const struct tb_switch *sw = port->sw; + int credits; + + credits = tb_available_credits(port, NULL); + if (tb_acpi_may_tunnel_pcie()) + credits -= sw->max_pcie_credits; + credits -= port->dma_credits; - max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> - ADP_CS_4_TOTAL_BUFFERS_SHIFT; - return min(max_credits, 13U); + return credits > 0 ? credits : 0; } -static int tb_dma_activate(struct tb_tunnel *tunnel, bool active) +static int tb_dma_reserve_credits(struct tb_path_hop *hop, unsigned int credits) { - struct tb_port *nhi = tunnel->src_port; - u32 credits; + struct tb_port *port = hop->in_port; + + if (tb_port_use_credit_allocation(port)) { + unsigned int available = tb_dma_available_credits(port); + + /* + * Need to have at least TB_MIN_DMA_CREDITS, otherwise + * DMA path cannot be established. + */ + if (available < TB_MIN_DMA_CREDITS) + return -ENOSPC; + + while (credits > available) + credits--; + + tb_port_dbg(port, "reserving %u credits for DMA path\n", + credits); - credits = active ? tb_dma_credits(nhi) : 0; - return tb_port_set_initial_credits(nhi, credits); + port->dma_credits += credits; + } else { + if (tb_port_is_null(port)) + credits = port->bonded ? 14 : 6; + else + credits = min(port->total_credits, credits); + } + + hop->initial_credits = credits; + return 0; } -static void tb_dma_init_path(struct tb_path *path, unsigned int isb, - unsigned int efc, u32 credits) +/* Path from lane adapter to NHI */ +static int tb_dma_init_rx_path(struct tb_path *path, unsigned int credits) { - int i; + struct tb_path_hop *hop; + unsigned int i, tmp; - path->egress_fc_enable = efc; + path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; path->ingress_fc_enable = TB_PATH_ALL; path->egress_shared_buffer = TB_PATH_NONE; - path->ingress_shared_buffer = isb; + path->ingress_shared_buffer = TB_PATH_NONE; path->priority = 5; path->weight = 1; path->clear_fc = true; - for (i = 0; i < path->path_length; i++) - path->hops[i].initial_credits = credits; + /* + * First lane adapter is the one connected to the remote host. + * We don't tunnel other traffic over this link so can use all + * the credits (except the ones reserved for control traffic). + */ + hop = &path->hops[0]; + tmp = min(tb_usable_credits(hop->in_port), credits); + hop->initial_credits = tmp; + hop->in_port->dma_credits += tmp; + + for (i = 1; i < path->path_length; i++) { + int ret; + + ret = tb_dma_reserve_credits(&path->hops[i], credits); + if (ret) + return ret; + } + + return 0; +} + +/* Path from NHI to lane adapter */ +static int tb_dma_init_tx_path(struct tb_path *path, unsigned int credits) +{ + struct tb_path_hop *hop; + + path->egress_fc_enable = TB_PATH_ALL; + path->ingress_fc_enable = TB_PATH_ALL; + path->egress_shared_buffer = TB_PATH_NONE; + path->ingress_shared_buffer = TB_PATH_NONE; + path->priority = 5; + path->weight = 1; + path->clear_fc = true; + + tb_path_for_each_hop(path, hop) { + int ret; + + ret = tb_dma_reserve_credits(hop, credits); + if (ret) + return ret; + } + + return 0; +} + +static void tb_dma_release_credits(struct tb_path_hop *hop) +{ + struct tb_port *port = hop->in_port; + + if (tb_port_use_credit_allocation(port)) { + port->dma_credits -= hop->initial_credits; + + tb_port_dbg(port, "released %u DMA path credits\n", + hop->initial_credits); + } +} + +static void tb_dma_deinit_path(struct tb_path *path) +{ + struct tb_path_hop *hop; + + tb_path_for_each_hop(path, hop) + tb_dma_release_credits(hop); +} + +static void tb_dma_deinit(struct tb_tunnel *tunnel) +{ + int i; + + for (i = 0; i < tunnel->npaths; i++) { + if (!tunnel->paths[i]) + continue; + tb_dma_deinit_path(tunnel->paths[i]); + } } /** @@ -806,52 +1065,159 @@ static void tb_dma_init_path(struct tb_path *path, unsigned int isb, * @tb: Pointer to the domain structure * @nhi: Host controller port * @dst: Destination null port which the other domain is connected to - * @transmit_ring: NHI ring number used to send packets towards the - * other domain * @transmit_path: HopID used for transmitting packets + * @transmit_ring: NHI ring number used to send packets towards the + * other domain. Set to %-1 if TX path is not needed. + * @receive_path: HopID used for receiving packets * @receive_ring: NHI ring number used to receive packets from the - * other domain - * @reveive_path: HopID used for receiving packets + * other domain. Set to %-1 if RX path is not needed. * * Return: Returns a tb_tunnel on success or NULL on failure. */ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, - struct tb_port *dst, int transmit_ring, - int transmit_path, int receive_ring, - int receive_path) + struct tb_port *dst, int transmit_path, + int transmit_ring, int receive_path, + int receive_ring) { struct tb_tunnel *tunnel; + size_t npaths = 0, i = 0; struct tb_path *path; - u32 credits; + int credits; + + if (receive_ring > 0) + npaths++; + if (transmit_ring > 0) + npaths++; - tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_DMA); + if (WARN_ON(!npaths)) + return NULL; + + tunnel = tb_tunnel_alloc(tb, npaths, TB_TUNNEL_DMA); if (!tunnel) return NULL; - tunnel->activate = tb_dma_activate; tunnel->src_port = nhi; tunnel->dst_port = dst; + tunnel->deinit = tb_dma_deinit; + + credits = min_not_zero(TB_DMA_CREDITS, nhi->sw->max_dma_credits); + + if (receive_ring > 0) { + path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, + "DMA RX"); + if (!path) + goto err_free; + tunnel->paths[i++] = path; + if (tb_dma_init_rx_path(path, credits)) { + tb_tunnel_dbg(tunnel, "not enough buffers for RX path\n"); + goto err_free; + } + } - credits = tb_dma_credits(nhi); + if (transmit_ring > 0) { + path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, + "DMA TX"); + if (!path) + goto err_free; + tunnel->paths[i++] = path; + if (tb_dma_init_tx_path(path, credits)) { + tb_tunnel_dbg(tunnel, "not enough buffers for TX path\n"); + goto err_free; + } + } - path = tb_path_alloc(tb, dst, receive_path, nhi, receive_ring, 0, "DMA RX"); - if (!path) { - tb_tunnel_free(tunnel); - return NULL; + return tunnel; + +err_free: + tb_tunnel_free(tunnel); + return NULL; +} + +/** + * tb_tunnel_match_dma() - Match DMA tunnel + * @tunnel: Tunnel to match + * @transmit_path: HopID used for transmitting packets. Pass %-1 to ignore. + * @transmit_ring: NHI ring number used to send packets towards the + * other domain. Pass %-1 to ignore. + * @receive_path: HopID used for receiving packets. Pass %-1 to ignore. + * @receive_ring: NHI ring number used to receive packets from the + * other domain. Pass %-1 to ignore. + * + * This function can be used to match specific DMA tunnel, if there are + * multiple DMA tunnels going through the same XDomain connection. + * Returns true if there is match and false otherwise. + */ +bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path, + int transmit_ring, int receive_path, int receive_ring) +{ + const struct tb_path *tx_path = NULL, *rx_path = NULL; + int i; + + if (!receive_ring || !transmit_ring) + return false; + + for (i = 0; i < tunnel->npaths; i++) { + const struct tb_path *path = tunnel->paths[i]; + + if (!path) + continue; + + if (tb_port_is_nhi(path->hops[0].in_port)) + tx_path = path; + else if (tb_port_is_nhi(path->hops[path->path_length - 1].out_port)) + rx_path = path; } - tb_dma_init_path(path, TB_PATH_NONE, TB_PATH_SOURCE | TB_PATH_INTERNAL, - credits); - tunnel->paths[TB_DMA_PATH_IN] = path; - path = tb_path_alloc(tb, nhi, transmit_ring, dst, transmit_path, 0, "DMA TX"); - if (!path) { - tb_tunnel_free(tunnel); - return NULL; + if (transmit_ring > 0 || transmit_path > 0) { + if (!tx_path) + return false; + if (transmit_ring > 0 && + (tx_path->hops[0].in_hop_index != transmit_ring)) + return false; + if (transmit_path > 0 && + (tx_path->hops[tx_path->path_length - 1].next_hop_index != transmit_path)) + return false; } - tb_dma_init_path(path, TB_PATH_SOURCE, TB_PATH_ALL, credits); - tunnel->paths[TB_DMA_PATH_OUT] = path; - return tunnel; + if (receive_ring > 0 || receive_path > 0) { + if (!rx_path) + return false; + if (receive_path > 0 && + (rx_path->hops[0].in_hop_index != receive_path)) + return false; + if (receive_ring > 0 && + (rx_path->hops[rx_path->path_length - 1].next_hop_index != receive_ring)) + return false; + } + + return true; +} + +static int tb_usb3_max_link_rate(struct tb_port *up, struct tb_port *down) +{ + int ret, up_max_rate, down_max_rate; + + ret = usb4_usb3_port_max_link_rate(up); + if (ret < 0) + return ret; + up_max_rate = ret; + + ret = usb4_usb3_port_max_link_rate(down); + if (ret < 0) + return ret; + down_max_rate = ret; + + return min(up_max_rate, down_max_rate); +} + +static int tb_usb3_init(struct tb_tunnel *tunnel) +{ + tb_tunnel_dbg(tunnel, "allocating initial bandwidth %d/%d Mb/s\n", + tunnel->allocated_up, tunnel->allocated_down); + + return usb4_usb3_port_allocate_bandwidth(tunnel->src_port, + &tunnel->allocated_up, + &tunnel->allocated_down); } static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate) @@ -868,8 +1234,118 @@ static int tb_usb3_activate(struct tb_tunnel *tunnel, bool activate) return 0; } +static int tb_usb3_consumed_bandwidth(struct tb_tunnel *tunnel, + int *consumed_up, int *consumed_down) +{ + int pcie_enabled = tb_acpi_may_tunnel_pcie(); + + /* + * PCIe tunneling, if enabled, affects the USB3 bandwidth so + * take that it into account here. + */ + *consumed_up = tunnel->allocated_up * (3 + pcie_enabled) / 3; + *consumed_down = tunnel->allocated_down * (3 + pcie_enabled) / 3; + return 0; +} + +static int tb_usb3_release_unused_bandwidth(struct tb_tunnel *tunnel) +{ + int ret; + + ret = usb4_usb3_port_release_bandwidth(tunnel->src_port, + &tunnel->allocated_up, + &tunnel->allocated_down); + if (ret) + return ret; + + tb_tunnel_dbg(tunnel, "decreased bandwidth allocation to %d/%d Mb/s\n", + tunnel->allocated_up, tunnel->allocated_down); + return 0; +} + +static void tb_usb3_reclaim_available_bandwidth(struct tb_tunnel *tunnel, + int *available_up, + int *available_down) +{ + int ret, max_rate, allocate_up, allocate_down; + + ret = usb4_usb3_port_actual_link_rate(tunnel->src_port); + if (ret < 0) { + tb_tunnel_warn(tunnel, "failed to read actual link rate\n"); + return; + } else if (!ret) { + /* Use maximum link rate if the link valid is not set */ + ret = usb4_usb3_port_max_link_rate(tunnel->src_port); + if (ret < 0) { + tb_tunnel_warn(tunnel, "failed to read maximum link rate\n"); + return; + } + } + + /* + * 90% of the max rate can be allocated for isochronous + * transfers. + */ + max_rate = ret * 90 / 100; + + /* No need to reclaim if already at maximum */ + if (tunnel->allocated_up >= max_rate && + tunnel->allocated_down >= max_rate) + return; + + /* Don't go lower than what is already allocated */ + allocate_up = min(max_rate, *available_up); + if (allocate_up < tunnel->allocated_up) + allocate_up = tunnel->allocated_up; + + allocate_down = min(max_rate, *available_down); + if (allocate_down < tunnel->allocated_down) + allocate_down = tunnel->allocated_down; + + /* If no changes no need to do more */ + if (allocate_up == tunnel->allocated_up && + allocate_down == tunnel->allocated_down) + return; + + ret = usb4_usb3_port_allocate_bandwidth(tunnel->src_port, &allocate_up, + &allocate_down); + if (ret) { + tb_tunnel_info(tunnel, "failed to allocate bandwidth\n"); + return; + } + + tunnel->allocated_up = allocate_up; + *available_up -= tunnel->allocated_up; + + tunnel->allocated_down = allocate_down; + *available_down -= tunnel->allocated_down; + + tb_tunnel_dbg(tunnel, "increased bandwidth allocation to %d/%d Mb/s\n", + tunnel->allocated_up, tunnel->allocated_down); +} + +static void tb_usb3_init_credits(struct tb_path_hop *hop) +{ + struct tb_port *port = hop->in_port; + struct tb_switch *sw = port->sw; + unsigned int credits; + + if (tb_port_use_credit_allocation(port)) { + credits = sw->max_usb3_credits; + } else { + if (tb_port_is_null(port)) + credits = port->bonded ? 32 : 16; + else + credits = 7; + } + + hop->initial_credits = credits; +} + static void tb_usb3_init_path(struct tb_path *path) { + struct tb_path_hop *hop; + path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; path->egress_shared_buffer = TB_PATH_NONE; path->ingress_fc_enable = TB_PATH_ALL; @@ -877,22 +1353,23 @@ static void tb_usb3_init_path(struct tb_path *path) path->priority = 3; path->weight = 3; path->drop_packages = 0; - path->nfc_credits = 0; - path->hops[0].initial_credits = 7; - path->hops[1].initial_credits = - tb_initial_credits(path->hops[1].in_port->sw); + + tb_path_for_each_hop(path, hop) + tb_usb3_init_credits(hop); } /** * tb_tunnel_discover_usb3() - Discover existing USB3 tunnels * @tb: Pointer to the domain structure * @down: USB3 downstream adapter + * @alloc_hopid: Allocate HopIDs from visited ports * * If @down adapter is active, follows the tunnel to the USB3 upstream * adapter and back. Returns the discovered tunnel or %NULL if there was * no tunnel. */ -struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down) +struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down, + bool alloc_hopid) { struct tb_tunnel *tunnel; struct tb_path *path; @@ -913,21 +1390,21 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down) * case. */ path = tb_path_discover(down, TB_USB3_HOPID, NULL, -1, - &tunnel->dst_port, "USB3 Up"); + &tunnel->dst_port, "USB3 Down", alloc_hopid); if (!path) { /* Just disable the downstream port */ tb_usb3_port_enable(down, false); goto err_free; } - tunnel->paths[TB_USB3_PATH_UP] = path; - tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]); + tunnel->paths[TB_USB3_PATH_DOWN] = path; + tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]); path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB3_HOPID, NULL, - "USB3 Down"); + "USB3 Up", alloc_hopid); if (!path) goto err_deactivate; - tunnel->paths[TB_USB3_PATH_DOWN] = path; - tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_DOWN]); + tunnel->paths[TB_USB3_PATH_UP] = path; + tb_usb3_init_path(tunnel->paths[TB_USB3_PATH_UP]); /* Validate that the tunnel is complete */ if (!tb_port_is_usb3_up(tunnel->dst_port)) { @@ -947,6 +1424,29 @@ struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down) goto err_deactivate; } + if (!tb_route(down->sw)) { + int ret; + + /* + * Read the initial bandwidth allocation for the first + * hop tunnel. + */ + ret = usb4_usb3_port_allocated_bandwidth(down, + &tunnel->allocated_up, &tunnel->allocated_down); + if (ret) + goto err_deactivate; + + tb_tunnel_dbg(tunnel, "currently allocated bandwidth %d/%d Mb/s\n", + tunnel->allocated_up, tunnel->allocated_down); + + tunnel->init = tb_usb3_init; + tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; + tunnel->release_unused_bandwidth = + tb_usb3_release_unused_bandwidth; + tunnel->reclaim_available_bandwidth = + tb_usb3_reclaim_available_bandwidth; + } + tb_tunnel_dbg(tunnel, "discovered\n"); return tunnel; @@ -963,6 +1463,10 @@ err_free: * @tb: Pointer to the domain structure * @up: USB3 upstream adapter port * @down: USB3 downstream adapter port + * @max_up: Maximum available upstream bandwidth for the USB3 tunnel (%0 + * if not limited). + * @max_down: Maximum available downstream bandwidth for the USB3 tunnel + * (%0 if not limited). * * Allocate an USB3 tunnel. The ports must be of type @TB_TYPE_USB3_UP and * @TB_TYPE_USB3_DOWN. @@ -970,10 +1474,32 @@ err_free: * Return: Returns a tb_tunnel on success or %NULL on failure. */ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, - struct tb_port *down) + struct tb_port *down, int max_up, + int max_down) { struct tb_tunnel *tunnel; struct tb_path *path; + int max_rate = 0; + + /* + * Check that we have enough bandwidth available for the new + * USB3 tunnel. + */ + if (max_up > 0 || max_down > 0) { + max_rate = tb_usb3_max_link_rate(down, up); + if (max_rate < 0) + return NULL; + + /* Only 90% can be allocated for USB3 isochronous transfers */ + max_rate = max_rate * 90 / 100; + tb_port_dbg(up, "required bandwidth for USB3 tunnel %d Mb/s\n", + max_rate); + + if (max_rate > max_up || max_rate > max_down) { + tb_port_warn(up, "not enough bandwidth for USB3 tunnel\n"); + return NULL; + } + } tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB3); if (!tunnel) @@ -982,6 +1508,8 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, tunnel->activate = tb_usb3_activate; tunnel->src_port = down; tunnel->dst_port = up; + tunnel->max_up = max_up; + tunnel->max_down = max_down; path = tb_path_alloc(tb, down, TB_USB3_HOPID, up, TB_USB3_HOPID, 0, "USB3 Down"); @@ -1001,6 +1529,18 @@ struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, tb_usb3_init_path(path); tunnel->paths[TB_USB3_PATH_UP] = path; + if (!tb_route(down->sw)) { + tunnel->allocated_up = max_rate; + tunnel->allocated_down = max_rate; + + tunnel->init = tb_usb3_init; + tunnel->consumed_bandwidth = tb_usb3_consumed_bandwidth; + tunnel->release_unused_bandwidth = + tb_usb3_release_unused_bandwidth; + tunnel->reclaim_available_bandwidth = + tb_usb3_reclaim_available_bandwidth; + } + return tunnel; } @@ -1017,6 +1557,9 @@ void tb_tunnel_free(struct tb_tunnel *tunnel) if (!tunnel) return; + if (tunnel->deinit) + tunnel->deinit(tunnel); + for (i = 0; i < tunnel->npaths; i++) { if (tunnel->paths[i]) tb_path_free(tunnel->paths[i]); @@ -1133,22 +1676,23 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel) } /** - * tb_tunnel_switch_on_path() - Does the tunnel go through switch + * tb_tunnel_port_on_path() - Does the tunnel go through port * @tunnel: Tunnel to check - * @sw: Switch to check + * @port: Port to check * - * Returns true if @tunnel goes through @sw (direction does not matter), + * Returns true if @tunnel goes through @port (direction does not matter), * false otherwise. */ -bool tb_tunnel_switch_on_path(const struct tb_tunnel *tunnel, - const struct tb_switch *sw) +bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel, + const struct tb_port *port) { int i; for (i = 0; i < tunnel->npaths; i++) { if (!tunnel->paths[i]) continue; - if (tb_path_switch_on_path(tunnel->paths[i], sw)) + + if (tb_path_port_on_path(tunnel->paths[i], port)) return true; } @@ -1172,21 +1716,87 @@ static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel) /** * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel * @tunnel: Tunnel to check + * @consumed_up: Consumed bandwidth in Mb/s from @dst_port to @src_port. + * Can be %NULL. + * @consumed_down: Consumed bandwidth in Mb/s from @src_port to @dst_port. + * Can be %NULL. * - * Returns bandwidth currently consumed by @tunnel and %0 if the @tunnel - * is not active or does consume bandwidth. + * Stores the amount of isochronous bandwidth @tunnel consumes in + * @consumed_up and @consumed_down. In case of success returns %0, + * negative errno otherwise. */ -int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel) +int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, + int *consumed_down) { + int up_bw = 0, down_bw = 0; + if (!tb_tunnel_is_active(tunnel)) - return 0; + goto out; if (tunnel->consumed_bandwidth) { - int ret = tunnel->consumed_bandwidth(tunnel); + int ret; - tb_tunnel_dbg(tunnel, "consumed bandwidth %d Mb/s\n", ret); - return ret; + ret = tunnel->consumed_bandwidth(tunnel, &up_bw, &down_bw); + if (ret) + return ret; + + tb_tunnel_dbg(tunnel, "consumed bandwidth %d/%d Mb/s\n", up_bw, + down_bw); + } + +out: + if (consumed_up) + *consumed_up = up_bw; + if (consumed_down) + *consumed_down = down_bw; + + return 0; +} + +/** + * tb_tunnel_release_unused_bandwidth() - Release unused bandwidth + * @tunnel: Tunnel whose unused bandwidth to release + * + * If tunnel supports dynamic bandwidth management (USB3 tunnels at the + * moment) this function makes it to release all the unused bandwidth. + * + * Returns %0 in case of success and negative errno otherwise. + */ +int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel) +{ + if (!tb_tunnel_is_active(tunnel)) + return 0; + + if (tunnel->release_unused_bandwidth) { + int ret; + + ret = tunnel->release_unused_bandwidth(tunnel); + if (ret) + return ret; } return 0; } + +/** + * tb_tunnel_reclaim_available_bandwidth() - Reclaim available bandwidth + * @tunnel: Tunnel reclaiming available bandwidth + * @available_up: Available upstream bandwidth (in Mb/s) + * @available_down: Available downstream bandwidth (in Mb/s) + * + * Reclaims bandwidth from @available_up and @available_down and updates + * the variables accordingly (e.g decreases both according to what was + * reclaimed by the tunnel). If nothing was reclaimed the values are + * kept as is. + */ +void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel, + int *available_up, + int *available_down) +{ + if (!tb_tunnel_is_active(tunnel)) + return; + + if (tunnel->reclaim_available_bandwidth) + tunnel->reclaim_available_bandwidth(tunnel, available_up, + available_down); +} diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h index 3f5ba93225e7..bb4d1f1d6d0b 100644 --- a/drivers/thunderbolt/tunnel.h +++ b/drivers/thunderbolt/tunnel.h @@ -27,12 +27,19 @@ enum tb_tunnel_type { * @paths: All paths required by the tunnel * @npaths: Number of paths in @paths * @init: Optional tunnel specific initialization + * @deinit: Optional tunnel specific de-initialization * @activate: Optional tunnel specific activation/deactivation * @consumed_bandwidth: Return how much bandwidth the tunnel consumes + * @release_unused_bandwidth: Release all unused bandwidth + * @reclaim_available_bandwidth: Reclaim back available bandwidth * @list: Tunnels are linked using this field * @type: Type of the tunnel - * @max_bw: Maximum bandwidth (Mb/s) available for the tunnel (only for DP). + * @max_up: Maximum upstream bandwidth (Mb/s) available for the tunnel. * Only set if the bandwidth needs to be limited. + * @max_down: Maximum downstream bandwidth (Mb/s) available for the tunnel. + * Only set if the bandwidth needs to be limited. + * @allocated_up: Allocated upstream bandwidth (only for USB3) + * @allocated_down: Allocated downstream bandwidth (only for USB3) */ struct tb_tunnel { struct tb *tb; @@ -41,35 +48,56 @@ struct tb_tunnel { struct tb_path **paths; size_t npaths; int (*init)(struct tb_tunnel *tunnel); + void (*deinit)(struct tb_tunnel *tunnel); int (*activate)(struct tb_tunnel *tunnel, bool activate); - int (*consumed_bandwidth)(struct tb_tunnel *tunnel); + int (*consumed_bandwidth)(struct tb_tunnel *tunnel, int *consumed_up, + int *consumed_down); + int (*release_unused_bandwidth)(struct tb_tunnel *tunnel); + void (*reclaim_available_bandwidth)(struct tb_tunnel *tunnel, + int *available_up, + int *available_down); struct list_head list; enum tb_tunnel_type type; - unsigned int max_bw; + int max_up; + int max_down; + int allocated_up; + int allocated_down; }; -struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down); +struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down, + bool alloc_hopid); struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, struct tb_port *down); -struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in); +struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in, + bool alloc_hopid); struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, - struct tb_port *out, int max_bw); + struct tb_port *out, int link_nr, + int max_up, int max_down); struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, - struct tb_port *dst, int transmit_ring, - int transmit_path, int receive_ring, - int receive_path); -struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down); + struct tb_port *dst, int transmit_path, + int transmit_ring, int receive_path, + int receive_ring); +bool tb_tunnel_match_dma(const struct tb_tunnel *tunnel, int transmit_path, + int transmit_ring, int receive_path, int receive_ring); +struct tb_tunnel *tb_tunnel_discover_usb3(struct tb *tb, struct tb_port *down, + bool alloc_hopid); struct tb_tunnel *tb_tunnel_alloc_usb3(struct tb *tb, struct tb_port *up, - struct tb_port *down); + struct tb_port *down, int max_up, + int max_down); void tb_tunnel_free(struct tb_tunnel *tunnel); int tb_tunnel_activate(struct tb_tunnel *tunnel); int tb_tunnel_restart(struct tb_tunnel *tunnel); void tb_tunnel_deactivate(struct tb_tunnel *tunnel); bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel); -bool tb_tunnel_switch_on_path(const struct tb_tunnel *tunnel, - const struct tb_switch *sw); -int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel); +bool tb_tunnel_port_on_path(const struct tb_tunnel *tunnel, + const struct tb_port *port); +int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel, int *consumed_up, + int *consumed_down); +int tb_tunnel_release_unused_bandwidth(struct tb_tunnel *tunnel); +void tb_tunnel_reclaim_available_bandwidth(struct tb_tunnel *tunnel, + int *available_up, + int *available_down); static inline bool tb_tunnel_is_pci(const struct tb_tunnel *tunnel) { diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index b341fc60c4ba..f986854aa207 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -10,21 +10,15 @@ #include <linux/delay.h> #include <linux/ktime.h> +#include "sb_regs.h" #include "tb.h" -#define USB4_DATA_DWORDS 16 #define USB4_DATA_RETRIES 3 -enum usb4_switch_op { - USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10, - USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11, - USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12, - USB4_SWITCH_OP_NVM_WRITE = 0x20, - USB4_SWITCH_OP_NVM_AUTH = 0x21, - USB4_SWITCH_OP_NVM_READ = 0x22, - USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23, - USB4_SWITCH_OP_DROM_READ = 0x24, - USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25, +enum usb4_sb_target { + USB4_SB_TARGET_ROUTER, + USB4_SB_TARGET_PARTNER, + USB4_SB_TARGET_RETIMER, }; #define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2) @@ -42,151 +36,175 @@ enum usb4_switch_op { #define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0) -typedef int (*read_block_fn)(struct tb_switch *, unsigned int, void *, size_t); -typedef int (*write_block_fn)(struct tb_switch *, const void *, size_t); +#define USB4_BA_LENGTH_MASK GENMASK(7, 0) +#define USB4_BA_INDEX_MASK GENMASK(15, 0) -static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, - u32 value, int timeout_msec) -{ - ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); +enum usb4_ba_index { + USB4_BA_MAX_USB3 = 0x1, + USB4_BA_MIN_DP_AUX = 0x2, + USB4_BA_MIN_DP_MAIN = 0x3, + USB4_BA_MAX_PCIE = 0x4, + USB4_BA_MAX_HI = 0x5, +}; - do { - u32 val; - int ret; +#define USB4_BA_VALUE_MASK GENMASK(31, 16) +#define USB4_BA_VALUE_SHIFT 16 + +static int usb4_native_switch_op(struct tb_switch *sw, u16 opcode, + u32 *metadata, u8 *status, + const void *tx_data, size_t tx_dwords, + void *rx_data, size_t rx_dwords) +{ + u32 val; + int ret; - ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); + if (metadata) { + ret = tb_sw_write(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); if (ret) return ret; + } + if (tx_dwords) { + ret = tb_sw_write(sw, tx_data, TB_CFG_SWITCH, ROUTER_CS_9, + tx_dwords); + if (ret) + return ret; + } - if ((val & bit) == value) - return 0; + val = opcode | ROUTER_CS_26_OV; + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); + if (ret) + return ret; - usleep_range(50, 100); - } while (ktime_before(ktime_get(), timeout)); + ret = tb_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500); + if (ret) + return ret; - return -ETIMEDOUT; -} + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); + if (ret) + return ret; -static int usb4_switch_op_read_data(struct tb_switch *sw, void *data, - size_t dwords) -{ - if (dwords > USB4_DATA_DWORDS) - return -EINVAL; + if (val & ROUTER_CS_26_ONS) + return -EOPNOTSUPP; + + if (status) + *status = (val & ROUTER_CS_26_STATUS_MASK) >> + ROUTER_CS_26_STATUS_SHIFT; + + if (metadata) { + ret = tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); + if (ret) + return ret; + } + if (rx_dwords) { + ret = tb_sw_read(sw, rx_data, TB_CFG_SWITCH, ROUTER_CS_9, + rx_dwords); + if (ret) + return ret; + } - return tb_sw_read(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords); + return 0; } -static int usb4_switch_op_write_data(struct tb_switch *sw, const void *data, - size_t dwords) +static int __usb4_switch_op(struct tb_switch *sw, u16 opcode, u32 *metadata, + u8 *status, const void *tx_data, size_t tx_dwords, + void *rx_data, size_t rx_dwords) { - if (dwords > USB4_DATA_DWORDS) + const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; + + if (tx_dwords > NVM_DATA_DWORDS || rx_dwords > NVM_DATA_DWORDS) return -EINVAL; - return tb_sw_write(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords); + /* + * If the connection manager implementation provides USB4 router + * operation proxy callback, call it here instead of running the + * operation natively. + */ + if (cm_ops->usb4_switch_op) { + int ret; + + ret = cm_ops->usb4_switch_op(sw, opcode, metadata, status, + tx_data, tx_dwords, rx_data, + rx_dwords); + if (ret != -EOPNOTSUPP) + return ret; + + /* + * If the proxy was not supported then run the native + * router operation instead. + */ + } + + return usb4_native_switch_op(sw, opcode, metadata, status, tx_data, + tx_dwords, rx_data, rx_dwords); } -static int usb4_switch_op_read_metadata(struct tb_switch *sw, u32 *metadata) +static inline int usb4_switch_op(struct tb_switch *sw, u16 opcode, + u32 *metadata, u8 *status) { - return tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); + return __usb4_switch_op(sw, opcode, metadata, status, NULL, 0, NULL, 0); } -static int usb4_switch_op_write_metadata(struct tb_switch *sw, u32 metadata) +static inline int usb4_switch_op_data(struct tb_switch *sw, u16 opcode, + u32 *metadata, u8 *status, + const void *tx_data, size_t tx_dwords, + void *rx_data, size_t rx_dwords) { - return tb_sw_write(sw, &metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); + return __usb4_switch_op(sw, opcode, metadata, status, tx_data, + tx_dwords, rx_data, rx_dwords); } -static int usb4_switch_do_read_data(struct tb_switch *sw, u16 address, - void *buf, size_t size, read_block_fn read_block) +static void usb4_switch_check_wakes(struct tb_switch *sw) { - unsigned int retries = USB4_DATA_RETRIES; - unsigned int offset; - - offset = address & 3; - address = address & ~3; - - do { - size_t nbytes = min_t(size_t, size, USB4_DATA_DWORDS * 4); - unsigned int dwaddress, dwords; - u8 data[USB4_DATA_DWORDS * 4]; - int ret; - - dwaddress = address / 4; - dwords = ALIGN(nbytes, 4) / 4; - - ret = read_block(sw, dwaddress, data, dwords); - if (ret) { - if (ret == -ETIMEDOUT) { - if (retries--) - continue; - ret = -EIO; - } - return ret; - } - - memcpy(buf, data + offset, nbytes); + struct tb_port *port; + bool wakeup = false; + u32 val; - size -= nbytes; - address += nbytes; - buf += nbytes; - } while (size > 0); + if (!device_may_wakeup(&sw->dev)) + return; - return 0; -} + if (tb_route(sw)) { + if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1)) + return; -static int usb4_switch_do_write_data(struct tb_switch *sw, u16 address, - const void *buf, size_t size, write_block_fn write_next_block) -{ - unsigned int retries = USB4_DATA_RETRIES; - unsigned int offset; + tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n", + (val & ROUTER_CS_6_WOPS) ? "yes" : "no", + (val & ROUTER_CS_6_WOUS) ? "yes" : "no"); - offset = address & 3; - address = address & ~3; + wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS); + } - do { - u32 nbytes = min_t(u32, size, USB4_DATA_DWORDS * 4); - u8 data[USB4_DATA_DWORDS * 4]; - int ret; + /* Check for any connected downstream ports for USB4 wake */ + tb_switch_for_each_port(sw, port) { + if (!tb_port_has_remote(port)) + continue; - memcpy(data + offset, buf, nbytes); + if (tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_18, 1)) + break; - ret = write_next_block(sw, data, nbytes / 4); - if (ret) { - if (ret == -ETIMEDOUT) { - if (retries--) - continue; - ret = -EIO; - } - return ret; - } + tb_port_dbg(port, "USB4 wake: %s\n", + (val & PORT_CS_18_WOU4S) ? "yes" : "no"); - size -= nbytes; - address += nbytes; - buf += nbytes; - } while (size > 0); + if (val & PORT_CS_18_WOU4S) + wakeup = true; + } - return 0; + if (wakeup) + pm_wakeup_event(&sw->dev, 0); } -static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status) +static bool link_is_usb4(struct tb_port *port) { u32 val; - int ret; - val = opcode | ROUTER_CS_26_OV; - ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); - if (ret) - return ret; - - ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500); - if (ret) - return ret; + if (!port->cap_usb4) + return false; - ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); - if (val & ROUTER_CS_26_ONS) - return -EOPNOTSUPP; + if (tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_18, 1)) + return false; - *status = (val & ROUTER_CS_26_STATUS_MASK) >> ROUTER_CS_26_STATUS_SHIFT; - return 0; + return !(val & PORT_CS_18_TCM); } /** @@ -202,11 +220,14 @@ static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status) */ int usb4_switch_setup(struct tb_switch *sw) { + struct tb_port *downstream_port; struct tb_switch *parent; bool tbt3, xhci; u32 val = 0; int ret; + usb4_switch_check_wakes(sw); + if (!tb_route(sw)) return 0; @@ -214,6 +235,11 @@ int usb4_switch_setup(struct tb_switch *sw) if (ret) return ret; + parent = tb_switch_parent(sw); + downstream_port = tb_port_at(tb_route(sw), parent); + sw->link_usb4 = link_is_usb4(downstream_port); + tb_sw_dbg(sw, "link: %s\n", sw->link_usb4 ? "USB4" : "TBT"); + xhci = val & ROUTER_CS_6_HCI; tbt3 = !(val & ROUTER_CS_6_TNS); @@ -224,15 +250,18 @@ int usb4_switch_setup(struct tb_switch *sw) if (ret) return ret; - parent = tb_switch_parent(sw); - - if (tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) { + if (tb_acpi_may_tunnel_usb3() && sw->link_usb4 && + tb_switch_find_port(parent, TB_TYPE_USB3_DOWN)) { val |= ROUTER_CS_5_UTO; xhci = false; } - /* Only enable PCIe tunneling if the parent router supports it */ - if (tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) { + /* + * Only enable PCIe tunneling if the parent router supports it + * and it is not disabled. + */ + if (tb_acpi_may_tunnel_pcie() && + tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) { val |= ROUTER_CS_5_PTO; /* * xHCI can be enabled if PCIe tunneling is supported @@ -252,13 +281,14 @@ int usb4_switch_setup(struct tb_switch *sw) if (ret) return ret; - return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR, - ROUTER_CS_6_CR, 50); + return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR, + ROUTER_CS_6_CR, 50); } /** * usb4_switch_read_uid() - Read UID from USB4 router * @sw: USB4 router + * @uid: UID is stored here * * Reads 64-bit UID from USB4 router config space. */ @@ -267,10 +297,11 @@ int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid) return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2); } -static int usb4_switch_drom_read_block(struct tb_switch *sw, +static int usb4_switch_drom_read_block(void *data, unsigned int dwaddress, void *buf, size_t dwords) { + struct tb_switch *sw = data; u8 status = 0; u32 metadata; int ret; @@ -279,23 +310,20 @@ static int usb4_switch_drom_read_block(struct tb_switch *sw, metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) & USB4_DROM_ADDRESS_MASK; - ret = usb4_switch_op_write_metadata(sw, metadata); - if (ret) - return ret; - - ret = usb4_switch_op(sw, USB4_SWITCH_OP_DROM_READ, &status); + ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_DROM_READ, &metadata, + &status, NULL, 0, buf, dwords); if (ret) return ret; - if (status) - return -EIO; - - return usb4_switch_op_read_data(sw, buf, dwords); + return status ? -EIO : 0; } /** * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM * @sw: USB4 router + * @address: Byte address inside DROM to start reading + * @buf: Buffer where the DROM content is stored + * @size: Number of bytes to read from DROM * * Uses USB4 router operations to read router DROM. For devices this * should always work but for hosts it may return %-EOPNOTSUPP in which @@ -304,91 +332,115 @@ static int usb4_switch_drom_read_block(struct tb_switch *sw, int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, size_t size) { - return usb4_switch_do_read_data(sw, address, buf, size, - usb4_switch_drom_read_block); + return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES, + usb4_switch_drom_read_block, sw); } -static int usb4_set_port_configured(struct tb_port *port, bool configured) +/** + * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding + * @sw: USB4 router + * + * Checks whether conditions are met so that lane bonding can be + * established with the upstream router. Call only for device routers. + */ +bool usb4_switch_lane_bonding_possible(struct tb_switch *sw) { + struct tb_port *up; int ret; u32 val; - ret = tb_port_read(port, &val, TB_CFG_PORT, - port->cap_usb4 + PORT_CS_19, 1); + up = tb_upstream_port(sw); + ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1); if (ret) - return ret; - - if (configured) - val |= PORT_CS_19_PC; - else - val &= ~PORT_CS_19_PC; + return false; - return tb_port_write(port, &val, TB_CFG_PORT, - port->cap_usb4 + PORT_CS_19, 1); + return !!(val & PORT_CS_18_BE); } /** - * usb4_switch_configure_link() - Set upstream USB4 link configured + * usb4_switch_set_wake() - Enabled/disable wake * @sw: USB4 router + * @flags: Wakeup flags (%0 to disable) * - * Sets the upstream USB4 link to be configured for power management - * purposes. + * Enables/disables router to wake up from sleep. */ -int usb4_switch_configure_link(struct tb_switch *sw) +int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags) { - struct tb_port *up; + struct tb_port *port; + u64 route = tb_route(sw); + u32 val; + int ret; - if (!tb_route(sw)) - return 0; + /* + * Enable wakes coming from all USB4 downstream ports (from + * child routers). For device routers do this also for the + * upstream USB4 port. + */ + tb_switch_for_each_port(sw, port) { + if (!tb_port_is_null(port)) + continue; + if (!route && tb_is_upstream_port(port)) + continue; + if (!port->cap_usb4) + continue; - up = tb_upstream_port(sw); - return usb4_set_port_configured(up, true); -} + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; -/** - * usb4_switch_unconfigure_link() - Un-set upstream USB4 link configuration - * @sw: USB4 router - * - * Reverse of usb4_switch_configure_link(). - */ -void usb4_switch_unconfigure_link(struct tb_switch *sw) -{ - struct tb_port *up; + val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4); - if (sw->is_unplugged || !tb_route(sw)) - return; + if (tb_is_upstream_port(port)) { + val |= PORT_CS_19_WOU4; + } else { + bool configured = val & PORT_CS_19_PC; - up = tb_upstream_port(sw); - usb4_set_port_configured(up, false); -} + if ((flags & TB_WAKE_ON_CONNECT) && !configured) + val |= PORT_CS_19_WOC; + if ((flags & TB_WAKE_ON_DISCONNECT) && configured) + val |= PORT_CS_19_WOD; + if ((flags & TB_WAKE_ON_USB4) && configured) + val |= PORT_CS_19_WOU4; + } -/** - * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding - * @sw: USB4 router - * - * Checks whether conditions are met so that lane bonding can be - * established with the upstream router. Call only for device routers. - */ -bool usb4_switch_lane_bonding_possible(struct tb_switch *sw) -{ - struct tb_port *up; - int ret; - u32 val; + ret = tb_port_write(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; + } - up = tb_upstream_port(sw); - ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1); - if (ret) - return false; + /* + * Enable wakes from PCIe, USB 3.x and DP on this router. Only + * needed for device routers. + */ + if (route) { + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); + if (ret) + return ret; - return !!(val & PORT_CS_18_BE); + val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU | ROUTER_CS_5_WOD); + if (flags & TB_WAKE_ON_USB3) + val |= ROUTER_CS_5_WOU; + if (flags & TB_WAKE_ON_PCIE) + val |= ROUTER_CS_5_WOP; + if (flags & TB_WAKE_ON_DP) + val |= ROUTER_CS_5_WOD; + + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); + if (ret) + return ret; + } + + return 0; } /** * usb4_switch_set_sleep() - Prepare the router to enter sleep * @sw: USB4 router * - * Enables wakes and sets sleep bit for the router. Returns when the - * router sleep ready bit has been asserted. + * Sets sleep bit for the router. Returns when the router sleep ready + * bit has been asserted. */ int usb4_switch_set_sleep(struct tb_switch *sw) { @@ -406,8 +458,8 @@ int usb4_switch_set_sleep(struct tb_switch *sw) if (ret) return ret; - return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR, - ROUTER_CS_6_SLPR, 500); + return tb_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR, + ROUTER_CS_6_SLPR, 500); } /** @@ -424,23 +476,21 @@ int usb4_switch_nvm_sector_size(struct tb_switch *sw) u8 status; int ret; - ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &status); + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &metadata, + &status); if (ret) return ret; if (status) return status == 0x2 ? -EOPNOTSUPP : -EIO; - ret = usb4_switch_op_read_metadata(sw, &metadata); - if (ret) - return ret; - return metadata & USB4_NVM_SECTOR_SIZE_MASK; } -static int usb4_switch_nvm_read_block(struct tb_switch *sw, +static int usb4_switch_nvm_read_block(void *data, unsigned int dwaddress, void *buf, size_t dwords) { + struct tb_switch *sw = data; u8 status = 0; u32 metadata; int ret; @@ -450,18 +500,12 @@ static int usb4_switch_nvm_read_block(struct tb_switch *sw, metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) & USB4_NVM_READ_OFFSET_MASK; - ret = usb4_switch_op_write_metadata(sw, metadata); + ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_READ, &metadata, + &status, NULL, 0, buf, dwords); if (ret) return ret; - ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_READ, &status); - if (ret) - return ret; - - if (status) - return -EIO; - - return usb4_switch_op_read_data(sw, buf, dwords); + return status ? -EIO : 0; } /** @@ -477,12 +521,21 @@ static int usb4_switch_nvm_read_block(struct tb_switch *sw, int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, size_t size) { - return usb4_switch_do_read_data(sw, address, buf, size, - usb4_switch_nvm_read_block); + return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES, + usb4_switch_nvm_read_block, sw); } -static int usb4_switch_nvm_set_offset(struct tb_switch *sw, - unsigned int address) +/** + * usb4_switch_nvm_set_offset() - Set NVM write offset + * @sw: USB4 router + * @address: Start offset + * + * Explicitly sets NVM write offset. Normally when writing to NVM this + * is done automatically by usb4_switch_nvm_write(). + * + * Returns %0 in success and negative errno if there was a failure. + */ +int usb4_switch_nvm_set_offset(struct tb_switch *sw, unsigned int address) { u32 metadata, dwaddress; u8 status = 0; @@ -492,28 +545,23 @@ static int usb4_switch_nvm_set_offset(struct tb_switch *sw, metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) & USB4_NVM_SET_OFFSET_MASK; - ret = usb4_switch_op_write_metadata(sw, metadata); - if (ret) - return ret; - - ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &status); + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &metadata, + &status); if (ret) return ret; return status ? -EIO : 0; } -static int usb4_switch_nvm_write_next_block(struct tb_switch *sw, +static int usb4_switch_nvm_write_next_block(void *data, unsigned int dwaddress, const void *buf, size_t dwords) { + struct tb_switch *sw = data; u8 status; int ret; - ret = usb4_switch_op_write_data(sw, buf, dwords); - if (ret) - return ret; - - ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_WRITE, &status); + ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_NVM_WRITE, NULL, &status, + buf, dwords, NULL, 0); if (ret) return ret; @@ -539,8 +587,8 @@ int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address, if (ret) return ret; - return usb4_switch_do_write_data(sw, address, buf, size, - usb4_switch_nvm_write_next_block); + return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES, + usb4_switch_nvm_write_next_block, sw); } /** @@ -548,32 +596,219 @@ int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address, * @sw: USB4 router * * After the new NVM has been written via usb4_switch_nvm_write(), this - * function triggers NVM authentication process. If the authentication - * is successful the router is power cycled and the new NVM starts + * function triggers NVM authentication process. The router gets power + * cycled and if the authentication is successful the new NVM starts * running. In case of failure returns negative errno. + * + * The caller should call usb4_switch_nvm_authenticate_status() to read + * the status of the authentication after power cycle. It should be the + * first router operation to avoid the status being lost. */ int usb4_switch_nvm_authenticate(struct tb_switch *sw) { - u8 status = 0; int ret; - ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, &status); + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, NULL, NULL); + switch (ret) { + /* + * The router is power cycled once NVM_AUTH is started so it is + * expected to get any of the following errors back. + */ + case -EACCES: + case -ENOTCONN: + case -ETIMEDOUT: + return 0; + + default: + return ret; + } +} + +/** + * usb4_switch_nvm_authenticate_status() - Read status of last NVM authenticate + * @sw: USB4 router + * @status: Status code of the operation + * + * The function checks if there is status available from the last NVM + * authenticate router operation. If there is status then %0 is returned + * and the status code is placed in @status. Returns negative errno in case + * of failure. + * + * Must be called before any other router operation. + */ +int usb4_switch_nvm_authenticate_status(struct tb_switch *sw, u32 *status) +{ + const struct tb_cm_ops *cm_ops = sw->tb->cm_ops; + u16 opcode; + u32 val; + int ret; + + if (cm_ops->usb4_switch_nvm_authenticate_status) { + ret = cm_ops->usb4_switch_nvm_authenticate_status(sw, status); + if (ret != -EOPNOTSUPP) + return ret; + } + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); if (ret) return ret; - switch (status) { - case 0x0: - tb_sw_dbg(sw, "NVM authentication successful\n"); - return 0; - case 0x1: - return -EINVAL; - case 0x2: - return -EAGAIN; - case 0x3: - return -EOPNOTSUPP; - default: + /* Check that the opcode is correct */ + opcode = val & ROUTER_CS_26_OPCODE_MASK; + if (opcode == USB4_SWITCH_OP_NVM_AUTH) { + if (val & ROUTER_CS_26_OV) + return -EBUSY; + if (val & ROUTER_CS_26_ONS) + return -EOPNOTSUPP; + + *status = (val & ROUTER_CS_26_STATUS_MASK) >> + ROUTER_CS_26_STATUS_SHIFT; + } else { + *status = 0; + } + + return 0; +} + +/** + * usb4_switch_credits_init() - Read buffer allocation parameters + * @sw: USB4 router + * + * Reads @sw buffer allocation parameters and initializes @sw buffer + * allocation fields accordingly. Specifically @sw->credits_allocation + * is set to %true if these parameters can be used in tunneling. + * + * Returns %0 on success and negative errno otherwise. + */ +int usb4_switch_credits_init(struct tb_switch *sw) +{ + int max_usb3, min_dp_aux, min_dp_main, max_pcie, max_dma; + int ret, length, i, nports; + const struct tb_port *port; + u32 data[NVM_DATA_DWORDS]; + u32 metadata = 0; + u8 status = 0; + + memset(data, 0, sizeof(data)); + ret = usb4_switch_op_data(sw, USB4_SWITCH_OP_BUFFER_ALLOC, &metadata, + &status, NULL, 0, data, ARRAY_SIZE(data)); + if (ret) + return ret; + if (status) return -EIO; + + length = metadata & USB4_BA_LENGTH_MASK; + if (WARN_ON(length > ARRAY_SIZE(data))) + return -EMSGSIZE; + + max_usb3 = -1; + min_dp_aux = -1; + min_dp_main = -1; + max_pcie = -1; + max_dma = -1; + + tb_sw_dbg(sw, "credit allocation parameters:\n"); + + for (i = 0; i < length; i++) { + u16 index, value; + + index = data[i] & USB4_BA_INDEX_MASK; + value = (data[i] & USB4_BA_VALUE_MASK) >> USB4_BA_VALUE_SHIFT; + + switch (index) { + case USB4_BA_MAX_USB3: + tb_sw_dbg(sw, " USB3: %u\n", value); + max_usb3 = value; + break; + case USB4_BA_MIN_DP_AUX: + tb_sw_dbg(sw, " DP AUX: %u\n", value); + min_dp_aux = value; + break; + case USB4_BA_MIN_DP_MAIN: + tb_sw_dbg(sw, " DP main: %u\n", value); + min_dp_main = value; + break; + case USB4_BA_MAX_PCIE: + tb_sw_dbg(sw, " PCIe: %u\n", value); + max_pcie = value; + break; + case USB4_BA_MAX_HI: + tb_sw_dbg(sw, " DMA: %u\n", value); + max_dma = value; + break; + default: + tb_sw_dbg(sw, " unknown credit allocation index %#x, skipping\n", + index); + break; + } + } + + /* + * Validate the buffer allocation preferences. If we find + * issues, log a warning and fall back using the hard-coded + * values. + */ + + /* Host router must report baMaxHI */ + if (!tb_route(sw) && max_dma < 0) { + tb_sw_warn(sw, "host router is missing baMaxHI\n"); + goto err_invalid; + } + + nports = 0; + tb_switch_for_each_port(sw, port) { + if (tb_port_is_null(port)) + nports++; + } + + /* Must have DP buffer allocation (multiple USB4 ports) */ + if (nports > 2 && (min_dp_aux < 0 || min_dp_main < 0)) { + tb_sw_warn(sw, "multiple USB4 ports require baMinDPaux/baMinDPmain\n"); + goto err_invalid; + } + + tb_switch_for_each_port(sw, port) { + if (tb_port_is_dpout(port) && min_dp_main < 0) { + tb_sw_warn(sw, "missing baMinDPmain"); + goto err_invalid; + } + if ((tb_port_is_dpin(port) || tb_port_is_dpout(port)) && + min_dp_aux < 0) { + tb_sw_warn(sw, "missing baMinDPaux"); + goto err_invalid; + } + if ((tb_port_is_usb3_down(port) || tb_port_is_usb3_up(port)) && + max_usb3 < 0) { + tb_sw_warn(sw, "missing baMaxUSB3"); + goto err_invalid; + } + if ((tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) && + max_pcie < 0) { + tb_sw_warn(sw, "missing baMaxPCIe"); + goto err_invalid; + } } + + /* + * Buffer allocation passed the validation so we can use it in + * path creation. + */ + sw->credit_allocation = true; + if (max_usb3 > 0) + sw->max_usb3_credits = max_usb3; + if (min_dp_aux > 0) + sw->min_dp_aux_credits = min_dp_aux; + if (min_dp_main > 0) + sw->min_dp_main_credits = min_dp_main; + if (max_pcie > 0) + sw->max_pcie_credits = max_pcie; + if (max_dma > 0) + sw->max_dma_credits = max_dma; + + return 0; + +err_invalid: + return -EINVAL; } /** @@ -587,14 +822,12 @@ int usb4_switch_nvm_authenticate(struct tb_switch *sw) */ bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) { + u32 metadata = in->port; u8 status; int ret; - ret = usb4_switch_op_write_metadata(sw, in->port); - if (ret) - return false; - - ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &status); + ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &metadata, + &status); /* * If DP resource allocation is not supported assume it is * always available. @@ -619,14 +852,12 @@ bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) */ int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) { + u32 metadata = in->port; u8 status; int ret; - ret = usb4_switch_op_write_metadata(sw, in->port); - if (ret) - return ret; - - ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &status); + ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &metadata, + &status); if (ret == -EOPNOTSUPP) return 0; else if (ret) @@ -644,14 +875,12 @@ int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) */ int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) { + u32 metadata = in->port; u8 status; int ret; - ret = usb4_switch_op_write_metadata(sw, in->port); - if (ret) - return ret; - - ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &status); + ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &metadata, + &status); if (ret == -EOPNOTSUPP) return 0; else if (ret) @@ -703,7 +932,7 @@ struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, if (!tb_port_is_pcie_down(p)) continue; - if (pcie_idx == usb4_idx && !tb_pci_port_is_enabled(p)) + if (pcie_idx == usb4_idx) return p; pcie_idx++; @@ -734,7 +963,7 @@ struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw, if (!tb_port_is_usb3_down(p)) continue; - if (usb_idx == usb4_idx && !tb_usb3_port_is_enabled(p)) + if (usb_idx == usb4_idx) return p; usb_idx++; @@ -744,6 +973,60 @@ struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw, } /** + * usb4_switch_add_ports() - Add USB4 ports for this router + * @sw: USB4 router + * + * For USB4 router finds all USB4 ports and registers devices for each. + * Can be called to any router. + * + * Return %0 in case of success and negative errno in case of failure. + */ +int usb4_switch_add_ports(struct tb_switch *sw) +{ + struct tb_port *port; + + if (tb_switch_is_icm(sw) || !tb_switch_is_usb4(sw)) + return 0; + + tb_switch_for_each_port(sw, port) { + struct usb4_port *usb4; + + if (!tb_port_is_null(port)) + continue; + if (!port->cap_usb4) + continue; + + usb4 = usb4_port_device_add(port); + if (IS_ERR(usb4)) { + usb4_switch_remove_ports(sw); + return PTR_ERR(usb4); + } + + port->usb4 = usb4; + } + + return 0; +} + +/** + * usb4_switch_remove_ports() - Removes USB4 ports from this router + * @sw: USB4 router + * + * Unregisters previously registered USB4 ports. + */ +void usb4_switch_remove_ports(struct tb_switch *sw) +{ + struct tb_port *port; + + tb_switch_for_each_port(sw, port) { + if (port->usb4) { + usb4_port_device_remove(port->usb4); + port->usb4 = NULL; + } + } +} + +/** * usb4_port_unlock() - Unlock USB4 downstream port * @port: USB4 port to unlock * @@ -762,3 +1045,1127 @@ int usb4_port_unlock(struct tb_port *port) val &= ~ADP_CS_4_LCK; return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1); } + +/** + * usb4_port_hotplug_enable() - Enables hotplug for a port + * @port: USB4 port to operate on + * + * Enables hot plug events on a given port. This is only intended + * to be used on lane, DP-IN, and DP-OUT adapters. + */ +int usb4_port_hotplug_enable(struct tb_port *port) +{ + int ret; + u32 val; + + ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_5, 1); + if (ret) + return ret; + + val &= ~ADP_CS_5_DHP; + return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_5, 1); +} + +static int usb4_port_set_configured(struct tb_port *port, bool configured) +{ + int ret; + u32 val; + + if (!port->cap_usb4) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; + + if (configured) + val |= PORT_CS_19_PC; + else + val &= ~PORT_CS_19_PC; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); +} + +/** + * usb4_port_configure() - Set USB4 port configured + * @port: USB4 router + * + * Sets the USB4 link to be configured for power management purposes. + */ +int usb4_port_configure(struct tb_port *port) +{ + return usb4_port_set_configured(port, true); +} + +/** + * usb4_port_unconfigure() - Set USB4 port unconfigured + * @port: USB4 router + * + * Sets the USB4 link to be unconfigured for power management purposes. + */ +void usb4_port_unconfigure(struct tb_port *port) +{ + usb4_port_set_configured(port, false); +} + +static int usb4_set_xdomain_configured(struct tb_port *port, bool configured) +{ + int ret; + u32 val; + + if (!port->cap_usb4) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; + + if (configured) + val |= PORT_CS_19_PID; + else + val &= ~PORT_CS_19_PID; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); +} + +/** + * usb4_port_configure_xdomain() - Configure port for XDomain + * @port: USB4 port connected to another host + * @xd: XDomain that is connected to the port + * + * Marks the USB4 port as being connected to another host and updates + * the link type. Returns %0 in success and negative errno in failure. + */ +int usb4_port_configure_xdomain(struct tb_port *port, struct tb_xdomain *xd) +{ + xd->link_usb4 = link_is_usb4(port); + return usb4_set_xdomain_configured(port, true); +} + +/** + * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain + * @port: USB4 port that was connected to another host + * + * Clears USB4 port from being marked as XDomain. + */ +void usb4_port_unconfigure_xdomain(struct tb_port *port) +{ + usb4_set_xdomain_configured(port, false); +} + +static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit, + u32 value, int timeout_msec) +{ + ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); + + do { + u32 val; + int ret; + + ret = tb_port_read(port, &val, TB_CFG_PORT, offset, 1); + if (ret) + return ret; + + if ((val & bit) == value) + return 0; + + usleep_range(50, 100); + } while (ktime_before(ktime_get(), timeout)); + + return -ETIMEDOUT; +} + +static int usb4_port_read_data(struct tb_port *port, void *data, size_t dwords) +{ + if (dwords > NVM_DATA_DWORDS) + return -EINVAL; + + return tb_port_read(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2, + dwords); +} + +static int usb4_port_write_data(struct tb_port *port, const void *data, + size_t dwords) +{ + if (dwords > NVM_DATA_DWORDS) + return -EINVAL; + + return tb_port_write(port, data, TB_CFG_PORT, port->cap_usb4 + PORT_CS_2, + dwords); +} + +static int usb4_port_sb_read(struct tb_port *port, enum usb4_sb_target target, + u8 index, u8 reg, void *buf, u8 size) +{ + size_t dwords = DIV_ROUND_UP(size, 4); + int ret; + u32 val; + + if (!port->cap_usb4) + return -EINVAL; + + val = reg; + val |= size << PORT_CS_1_LENGTH_SHIFT; + val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK; + if (target == USB4_SB_TARGET_RETIMER) + val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT); + val |= PORT_CS_1_PND; + + ret = tb_port_write(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_1, 1); + if (ret) + return ret; + + ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1, + PORT_CS_1_PND, 0, 500); + if (ret) + return ret; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_1, 1); + if (ret) + return ret; + + if (val & PORT_CS_1_NR) + return -ENODEV; + if (val & PORT_CS_1_RC) + return -EIO; + + return buf ? usb4_port_read_data(port, buf, dwords) : 0; +} + +static int usb4_port_sb_write(struct tb_port *port, enum usb4_sb_target target, + u8 index, u8 reg, const void *buf, u8 size) +{ + size_t dwords = DIV_ROUND_UP(size, 4); + int ret; + u32 val; + + if (!port->cap_usb4) + return -EINVAL; + + if (buf) { + ret = usb4_port_write_data(port, buf, dwords); + if (ret) + return ret; + } + + val = reg; + val |= size << PORT_CS_1_LENGTH_SHIFT; + val |= PORT_CS_1_WNR_WRITE; + val |= (target << PORT_CS_1_TARGET_SHIFT) & PORT_CS_1_TARGET_MASK; + if (target == USB4_SB_TARGET_RETIMER) + val |= (index << PORT_CS_1_RETIMER_INDEX_SHIFT); + val |= PORT_CS_1_PND; + + ret = tb_port_write(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_1, 1); + if (ret) + return ret; + + ret = usb4_port_wait_for_bit(port, port->cap_usb4 + PORT_CS_1, + PORT_CS_1_PND, 0, 500); + if (ret) + return ret; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_1, 1); + if (ret) + return ret; + + if (val & PORT_CS_1_NR) + return -ENODEV; + if (val & PORT_CS_1_RC) + return -EIO; + + return 0; +} + +static int usb4_port_sb_op(struct tb_port *port, enum usb4_sb_target target, + u8 index, enum usb4_sb_opcode opcode, int timeout_msec) +{ + ktime_t timeout; + u32 val; + int ret; + + val = opcode; + ret = usb4_port_sb_write(port, target, index, USB4_SB_OPCODE, &val, + sizeof(val)); + if (ret) + return ret; + + timeout = ktime_add_ms(ktime_get(), timeout_msec); + + do { + /* Check results */ + ret = usb4_port_sb_read(port, target, index, USB4_SB_OPCODE, + &val, sizeof(val)); + if (ret) + return ret; + + switch (val) { + case 0: + return 0; + + case USB4_SB_OPCODE_ERR: + return -EAGAIN; + + case USB4_SB_OPCODE_ONS: + return -EOPNOTSUPP; + + default: + if (val != opcode) + return -EIO; + break; + } + } while (ktime_before(ktime_get(), timeout)); + + return -ETIMEDOUT; +} + +static int usb4_port_set_router_offline(struct tb_port *port, bool offline) +{ + u32 val = !offline; + int ret; + + ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_METADATA, &val, sizeof(val)); + if (ret) + return ret; + + val = USB4_SB_OPCODE_ROUTER_OFFLINE; + return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_OPCODE, &val, sizeof(val)); +} + +/** + * usb4_port_router_offline() - Put the USB4 port to offline mode + * @port: USB4 port + * + * This function puts the USB4 port into offline mode. In this mode the + * port does not react on hotplug events anymore. This needs to be + * called before retimer access is done when the USB4 links is not up. + * + * Returns %0 in case of success and negative errno if there was an + * error. + */ +int usb4_port_router_offline(struct tb_port *port) +{ + return usb4_port_set_router_offline(port, true); +} + +/** + * usb4_port_router_online() - Put the USB4 port back to online + * @port: USB4 port + * + * Makes the USB4 port functional again. + */ +int usb4_port_router_online(struct tb_port *port) +{ + return usb4_port_set_router_offline(port, false); +} + +/** + * usb4_port_enumerate_retimers() - Send RT broadcast transaction + * @port: USB4 port + * + * This forces the USB4 port to send broadcast RT transaction which + * makes the retimers on the link to assign index to themselves. Returns + * %0 in case of success and negative errno if there was an error. + */ +int usb4_port_enumerate_retimers(struct tb_port *port) +{ + u32 val; + + val = USB4_SB_OPCODE_ENUMERATE_RETIMERS; + return usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_OPCODE, &val, sizeof(val)); +} + +/** + * usb4_port_clx_supported() - Check if CLx is supported by the link + * @port: Port to check for CLx support for + * + * PORT_CS_18_CPS bit reflects if the link supports CLx including + * active cables (if connected on the link). + */ +bool usb4_port_clx_supported(struct tb_port *port) +{ + int ret; + u32 val; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_18, 1); + if (ret) + return false; + + return !!(val & PORT_CS_18_CPS); +} + +/** + * usb4_port_margining_caps() - Read USB4 port marginig capabilities + * @port: USB4 port + * @caps: Array with at least two elements to hold the results + * + * Reads the USB4 port lane margining capabilities into @caps. + */ +int usb4_port_margining_caps(struct tb_port *port, u32 *caps) +{ + int ret; + + ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_OPCODE_READ_LANE_MARGINING_CAP, 500); + if (ret) + return ret; + + return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_DATA, caps, sizeof(*caps) * 2); +} + +/** + * usb4_port_hw_margin() - Run hardware lane margining on port + * @port: USB4 port + * @lanes: Which lanes to run (must match the port capabilities). Can be + * %0, %1 or %7. + * @ber_level: BER level contour value + * @timing: Perform timing margining instead of voltage + * @right_high: Use Right/high margin instead of left/low + * @results: Array with at least two elements to hold the results + * + * Runs hardware lane margining on USB4 port and returns the result in + * @results. + */ +int usb4_port_hw_margin(struct tb_port *port, unsigned int lanes, + unsigned int ber_level, bool timing, bool right_high, + u32 *results) +{ + u32 val; + int ret; + + val = lanes; + if (timing) + val |= USB4_MARGIN_HW_TIME; + if (right_high) + val |= USB4_MARGIN_HW_RH; + if (ber_level) + val |= (ber_level << USB4_MARGIN_HW_BER_SHIFT) & + USB4_MARGIN_HW_BER_MASK; + + ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_METADATA, &val, sizeof(val)); + if (ret) + return ret; + + ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_OPCODE_RUN_HW_LANE_MARGINING, 2500); + if (ret) + return ret; + + return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_DATA, results, sizeof(*results) * 2); +} + +/** + * usb4_port_sw_margin() - Run software lane margining on port + * @port: USB4 port + * @lanes: Which lanes to run (must match the port capabilities). Can be + * %0, %1 or %7. + * @timing: Perform timing margining instead of voltage + * @right_high: Use Right/high margin instead of left/low + * @counter: What to do with the error counter + * + * Runs software lane margining on USB4 port. Read back the error + * counters by calling usb4_port_sw_margin_errors(). Returns %0 in + * success and negative errno otherwise. + */ +int usb4_port_sw_margin(struct tb_port *port, unsigned int lanes, bool timing, + bool right_high, u32 counter) +{ + u32 val; + int ret; + + val = lanes; + if (timing) + val |= USB4_MARGIN_SW_TIME; + if (right_high) + val |= USB4_MARGIN_SW_RH; + val |= (counter << USB4_MARGIN_SW_COUNTER_SHIFT) & + USB4_MARGIN_SW_COUNTER_MASK; + + ret = usb4_port_sb_write(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_METADATA, &val, sizeof(val)); + if (ret) + return ret; + + return usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_OPCODE_RUN_SW_LANE_MARGINING, 2500); +} + +/** + * usb4_port_sw_margin_errors() - Read the software margining error counters + * @port: USB4 port + * @errors: Error metadata is copied here. + * + * This reads back the software margining error counters from the port. + * Returns %0 in success and negative errno otherwise. + */ +int usb4_port_sw_margin_errors(struct tb_port *port, u32 *errors) +{ + int ret; + + ret = usb4_port_sb_op(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_OPCODE_READ_SW_MARGIN_ERR, 150); + if (ret) + return ret; + + return usb4_port_sb_read(port, USB4_SB_TARGET_ROUTER, 0, + USB4_SB_METADATA, errors, sizeof(*errors)); +} + +static inline int usb4_port_retimer_op(struct tb_port *port, u8 index, + enum usb4_sb_opcode opcode, + int timeout_msec) +{ + return usb4_port_sb_op(port, USB4_SB_TARGET_RETIMER, index, opcode, + timeout_msec); +} + +/** + * usb4_port_retimer_set_inbound_sbtx() - Enable sideband channel transactions + * @port: USB4 port + * @index: Retimer index + * + * Enables sideband channel transations on SBTX. Can be used when USB4 + * link does not go up, for example if there is no device connected. + */ +int usb4_port_retimer_set_inbound_sbtx(struct tb_port *port, u8 index) +{ + int ret; + + ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX, + 500); + + if (ret != -ENODEV) + return ret; + + /* + * Per the USB4 retimer spec, the retimer is not required to + * send an RT (Retimer Transaction) response for the first + * SET_INBOUND_SBTX command + */ + return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_SET_INBOUND_SBTX, + 500); +} + +/** + * usb4_port_retimer_read() - Read from retimer sideband registers + * @port: USB4 port + * @index: Retimer index + * @reg: Sideband register to read + * @buf: Data from @reg is stored here + * @size: Number of bytes to read + * + * Function reads retimer sideband registers starting from @reg. The + * retimer is connected to @port at @index. Returns %0 in case of + * success, and read data is copied to @buf. If there is no retimer + * present at given @index returns %-ENODEV. In any other failure + * returns negative errno. + */ +int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf, + u8 size) +{ + return usb4_port_sb_read(port, USB4_SB_TARGET_RETIMER, index, reg, buf, + size); +} + +/** + * usb4_port_retimer_write() - Write to retimer sideband registers + * @port: USB4 port + * @index: Retimer index + * @reg: Sideband register to write + * @buf: Data that is written starting from @reg + * @size: Number of bytes to write + * + * Writes retimer sideband registers starting from @reg. The retimer is + * connected to @port at @index. Returns %0 in case of success. If there + * is no retimer present at given @index returns %-ENODEV. In any other + * failure returns negative errno. + */ +int usb4_port_retimer_write(struct tb_port *port, u8 index, u8 reg, + const void *buf, u8 size) +{ + return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, reg, buf, + size); +} + +/** + * usb4_port_retimer_is_last() - Is the retimer last on-board retimer + * @port: USB4 port + * @index: Retimer index + * + * If the retimer at @index is last one (connected directly to the + * Type-C port) this function returns %1. If it is not returns %0. If + * the retimer is not present returns %-ENODEV. Otherwise returns + * negative errno. + */ +int usb4_port_retimer_is_last(struct tb_port *port, u8 index) +{ + u32 metadata; + int ret; + + ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_QUERY_LAST_RETIMER, + 500); + if (ret) + return ret; + + ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata, + sizeof(metadata)); + return ret ? ret : metadata & 1; +} + +/** + * usb4_port_retimer_nvm_sector_size() - Read retimer NVM sector size + * @port: USB4 port + * @index: Retimer index + * + * Reads NVM sector size (in bytes) of a retimer at @index. This + * operation can be used to determine whether the retimer supports NVM + * upgrade for example. Returns sector size in bytes or negative errno + * in case of error. Specifically returns %-ENODEV if there is no + * retimer at @index. + */ +int usb4_port_retimer_nvm_sector_size(struct tb_port *port, u8 index) +{ + u32 metadata; + int ret; + + ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_GET_NVM_SECTOR_SIZE, + 500); + if (ret) + return ret; + + ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, &metadata, + sizeof(metadata)); + return ret ? ret : metadata & USB4_NVM_SECTOR_SIZE_MASK; +} + +/** + * usb4_port_retimer_nvm_set_offset() - Set NVM write offset + * @port: USB4 port + * @index: Retimer index + * @address: Start offset + * + * Exlicitly sets NVM write offset. Normally when writing to NVM this is + * done automatically by usb4_port_retimer_nvm_write(). + * + * Returns %0 in success and negative errno if there was a failure. + */ +int usb4_port_retimer_nvm_set_offset(struct tb_port *port, u8 index, + unsigned int address) +{ + u32 metadata, dwaddress; + int ret; + + dwaddress = address / 4; + metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) & + USB4_NVM_SET_OFFSET_MASK; + + ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata, + sizeof(metadata)); + if (ret) + return ret; + + return usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_SET_OFFSET, + 500); +} + +struct retimer_info { + struct tb_port *port; + u8 index; +}; + +static int usb4_port_retimer_nvm_write_next_block(void *data, + unsigned int dwaddress, const void *buf, size_t dwords) + +{ + const struct retimer_info *info = data; + struct tb_port *port = info->port; + u8 index = info->index; + int ret; + + ret = usb4_port_retimer_write(port, index, USB4_SB_DATA, + buf, dwords * 4); + if (ret) + return ret; + + return usb4_port_retimer_op(port, index, + USB4_SB_OPCODE_NVM_BLOCK_WRITE, 1000); +} + +/** + * usb4_port_retimer_nvm_write() - Write to retimer NVM + * @port: USB4 port + * @index: Retimer index + * @address: Byte address where to start the write + * @buf: Data to write + * @size: Size in bytes how much to write + * + * Writes @size bytes from @buf to the retimer NVM. Used for NVM + * upgrade. Returns %0 if the data was written successfully and negative + * errno in case of failure. Specifically returns %-ENODEV if there is + * no retimer at @index. + */ +int usb4_port_retimer_nvm_write(struct tb_port *port, u8 index, unsigned int address, + const void *buf, size_t size) +{ + struct retimer_info info = { .port = port, .index = index }; + int ret; + + ret = usb4_port_retimer_nvm_set_offset(port, index, address); + if (ret) + return ret; + + return tb_nvm_write_data(address, buf, size, USB4_DATA_RETRIES, + usb4_port_retimer_nvm_write_next_block, &info); +} + +/** + * usb4_port_retimer_nvm_authenticate() - Start retimer NVM upgrade + * @port: USB4 port + * @index: Retimer index + * + * After the new NVM image has been written via usb4_port_retimer_nvm_write() + * this function can be used to trigger the NVM upgrade process. If + * successful the retimer restarts with the new NVM and may not have the + * index set so one needs to call usb4_port_enumerate_retimers() to + * force index to be assigned. + */ +int usb4_port_retimer_nvm_authenticate(struct tb_port *port, u8 index) +{ + u32 val; + + /* + * We need to use the raw operation here because once the + * authentication completes the retimer index is not set anymore + * so we do not get back the status now. + */ + val = USB4_SB_OPCODE_NVM_AUTH_WRITE; + return usb4_port_sb_write(port, USB4_SB_TARGET_RETIMER, index, + USB4_SB_OPCODE, &val, sizeof(val)); +} + +/** + * usb4_port_retimer_nvm_authenticate_status() - Read status of NVM upgrade + * @port: USB4 port + * @index: Retimer index + * @status: Raw status code read from metadata + * + * This can be called after usb4_port_retimer_nvm_authenticate() and + * usb4_port_enumerate_retimers() to fetch status of the NVM upgrade. + * + * Returns %0 if the authentication status was successfully read. The + * completion metadata (the result) is then stored into @status. If + * reading the status fails, returns negative errno. + */ +int usb4_port_retimer_nvm_authenticate_status(struct tb_port *port, u8 index, + u32 *status) +{ + u32 metadata, val; + int ret; + + ret = usb4_port_retimer_read(port, index, USB4_SB_OPCODE, &val, + sizeof(val)); + if (ret) + return ret; + + switch (val) { + case 0: + *status = 0; + return 0; + + case USB4_SB_OPCODE_ERR: + ret = usb4_port_retimer_read(port, index, USB4_SB_METADATA, + &metadata, sizeof(metadata)); + if (ret) + return ret; + + *status = metadata & USB4_SB_METADATA_NVM_AUTH_WRITE_MASK; + return 0; + + case USB4_SB_OPCODE_ONS: + return -EOPNOTSUPP; + + default: + return -EIO; + } +} + +static int usb4_port_retimer_nvm_read_block(void *data, unsigned int dwaddress, + void *buf, size_t dwords) +{ + const struct retimer_info *info = data; + struct tb_port *port = info->port; + u8 index = info->index; + u32 metadata; + int ret; + + metadata = dwaddress << USB4_NVM_READ_OFFSET_SHIFT; + if (dwords < NVM_DATA_DWORDS) + metadata |= dwords << USB4_NVM_READ_LENGTH_SHIFT; + + ret = usb4_port_retimer_write(port, index, USB4_SB_METADATA, &metadata, + sizeof(metadata)); + if (ret) + return ret; + + ret = usb4_port_retimer_op(port, index, USB4_SB_OPCODE_NVM_READ, 500); + if (ret) + return ret; + + return usb4_port_retimer_read(port, index, USB4_SB_DATA, buf, + dwords * 4); +} + +/** + * usb4_port_retimer_nvm_read() - Read contents of retimer NVM + * @port: USB4 port + * @index: Retimer index + * @address: NVM address (in bytes) to start reading + * @buf: Data read from NVM is stored here + * @size: Number of bytes to read + * + * Reads retimer NVM and copies the contents to @buf. Returns %0 if the + * read was successful and negative errno in case of failure. + * Specifically returns %-ENODEV if there is no retimer at @index. + */ +int usb4_port_retimer_nvm_read(struct tb_port *port, u8 index, + unsigned int address, void *buf, size_t size) +{ + struct retimer_info info = { .port = port, .index = index }; + + return tb_nvm_read_data(address, buf, size, USB4_DATA_RETRIES, + usb4_port_retimer_nvm_read_block, &info); +} + +/** + * usb4_usb3_port_max_link_rate() - Maximum support USB3 link rate + * @port: USB3 adapter port + * + * Return maximum supported link rate of a USB3 adapter in Mb/s. + * Negative errno in case of error. + */ +int usb4_usb3_port_max_link_rate(struct tb_port *port) +{ + int ret, lr; + u32 val; + + if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port)) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_4, 1); + if (ret) + return ret; + + lr = (val & ADP_USB3_CS_4_MSLR_MASK) >> ADP_USB3_CS_4_MSLR_SHIFT; + return lr == ADP_USB3_CS_4_MSLR_20G ? 20000 : 10000; +} + +/** + * usb4_usb3_port_actual_link_rate() - Established USB3 link rate + * @port: USB3 adapter port + * + * Return actual established link rate of a USB3 adapter in Mb/s. If the + * link is not up returns %0 and negative errno in case of failure. + */ +int usb4_usb3_port_actual_link_rate(struct tb_port *port) +{ + int ret, lr; + u32 val; + + if (!tb_port_is_usb3_down(port) && !tb_port_is_usb3_up(port)) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_4, 1); + if (ret) + return ret; + + if (!(val & ADP_USB3_CS_4_ULV)) + return 0; + + lr = val & ADP_USB3_CS_4_ALR_MASK; + return lr == ADP_USB3_CS_4_ALR_20G ? 20000 : 10000; +} + +static int usb4_usb3_port_cm_request(struct tb_port *port, bool request) +{ + int ret; + u32 val; + + if (!tb_port_is_usb3_down(port)) + return -EINVAL; + if (tb_route(port->sw)) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_2, 1); + if (ret) + return ret; + + if (request) + val |= ADP_USB3_CS_2_CMR; + else + val &= ~ADP_USB3_CS_2_CMR; + + ret = tb_port_write(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_2, 1); + if (ret) + return ret; + + /* + * We can use val here directly as the CMR bit is in the same place + * as HCA. Just mask out others. + */ + val &= ADP_USB3_CS_2_CMR; + return usb4_port_wait_for_bit(port, port->cap_adap + ADP_USB3_CS_1, + ADP_USB3_CS_1_HCA, val, 1500); +} + +static inline int usb4_usb3_port_set_cm_request(struct tb_port *port) +{ + return usb4_usb3_port_cm_request(port, true); +} + +static inline int usb4_usb3_port_clear_cm_request(struct tb_port *port) +{ + return usb4_usb3_port_cm_request(port, false); +} + +static unsigned int usb3_bw_to_mbps(u32 bw, u8 scale) +{ + unsigned long uframes; + + uframes = bw * 512UL << scale; + return DIV_ROUND_CLOSEST(uframes * 8000, 1000 * 1000); +} + +static u32 mbps_to_usb3_bw(unsigned int mbps, u8 scale) +{ + unsigned long uframes; + + /* 1 uframe is 1/8 ms (125 us) -> 1 / 8000 s */ + uframes = ((unsigned long)mbps * 1000 * 1000) / 8000; + return DIV_ROUND_UP(uframes, 512UL << scale); +} + +static int usb4_usb3_port_read_allocated_bandwidth(struct tb_port *port, + int *upstream_bw, + int *downstream_bw) +{ + u32 val, bw, scale; + int ret; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_2, 1); + if (ret) + return ret; + + ret = tb_port_read(port, &scale, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_3, 1); + if (ret) + return ret; + + scale &= ADP_USB3_CS_3_SCALE_MASK; + + bw = val & ADP_USB3_CS_2_AUBW_MASK; + *upstream_bw = usb3_bw_to_mbps(bw, scale); + + bw = (val & ADP_USB3_CS_2_ADBW_MASK) >> ADP_USB3_CS_2_ADBW_SHIFT; + *downstream_bw = usb3_bw_to_mbps(bw, scale); + + return 0; +} + +/** + * usb4_usb3_port_allocated_bandwidth() - Bandwidth allocated for USB3 + * @port: USB3 adapter port + * @upstream_bw: Allocated upstream bandwidth is stored here + * @downstream_bw: Allocated downstream bandwidth is stored here + * + * Stores currently allocated USB3 bandwidth into @upstream_bw and + * @downstream_bw in Mb/s. Returns %0 in case of success and negative + * errno in failure. + */ +int usb4_usb3_port_allocated_bandwidth(struct tb_port *port, int *upstream_bw, + int *downstream_bw) +{ + int ret; + + ret = usb4_usb3_port_set_cm_request(port); + if (ret) + return ret; + + ret = usb4_usb3_port_read_allocated_bandwidth(port, upstream_bw, + downstream_bw); + usb4_usb3_port_clear_cm_request(port); + + return ret; +} + +static int usb4_usb3_port_read_consumed_bandwidth(struct tb_port *port, + int *upstream_bw, + int *downstream_bw) +{ + u32 val, bw, scale; + int ret; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_1, 1); + if (ret) + return ret; + + ret = tb_port_read(port, &scale, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_3, 1); + if (ret) + return ret; + + scale &= ADP_USB3_CS_3_SCALE_MASK; + + bw = val & ADP_USB3_CS_1_CUBW_MASK; + *upstream_bw = usb3_bw_to_mbps(bw, scale); + + bw = (val & ADP_USB3_CS_1_CDBW_MASK) >> ADP_USB3_CS_1_CDBW_SHIFT; + *downstream_bw = usb3_bw_to_mbps(bw, scale); + + return 0; +} + +static int usb4_usb3_port_write_allocated_bandwidth(struct tb_port *port, + int upstream_bw, + int downstream_bw) +{ + u32 val, ubw, dbw, scale; + int ret; + + /* Read the used scale, hardware default is 0 */ + ret = tb_port_read(port, &scale, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_3, 1); + if (ret) + return ret; + + scale &= ADP_USB3_CS_3_SCALE_MASK; + ubw = mbps_to_usb3_bw(upstream_bw, scale); + dbw = mbps_to_usb3_bw(downstream_bw, scale); + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_2, 1); + if (ret) + return ret; + + val &= ~(ADP_USB3_CS_2_AUBW_MASK | ADP_USB3_CS_2_ADBW_MASK); + val |= dbw << ADP_USB3_CS_2_ADBW_SHIFT; + val |= ubw; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_2, 1); +} + +/** + * usb4_usb3_port_allocate_bandwidth() - Allocate bandwidth for USB3 + * @port: USB3 adapter port + * @upstream_bw: New upstream bandwidth + * @downstream_bw: New downstream bandwidth + * + * This can be used to set how much bandwidth is allocated for the USB3 + * tunneled isochronous traffic. @upstream_bw and @downstream_bw are the + * new values programmed to the USB3 adapter allocation registers. If + * the values are lower than what is currently consumed the allocation + * is set to what is currently consumed instead (consumed bandwidth + * cannot be taken away by CM). The actual new values are returned in + * @upstream_bw and @downstream_bw. + * + * Returns %0 in case of success and negative errno if there was a + * failure. + */ +int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw, + int *downstream_bw) +{ + int ret, consumed_up, consumed_down, allocate_up, allocate_down; + + ret = usb4_usb3_port_set_cm_request(port); + if (ret) + return ret; + + ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up, + &consumed_down); + if (ret) + goto err_request; + + /* Don't allow it go lower than what is consumed */ + allocate_up = max(*upstream_bw, consumed_up); + allocate_down = max(*downstream_bw, consumed_down); + + ret = usb4_usb3_port_write_allocated_bandwidth(port, allocate_up, + allocate_down); + if (ret) + goto err_request; + + *upstream_bw = allocate_up; + *downstream_bw = allocate_down; + +err_request: + usb4_usb3_port_clear_cm_request(port); + return ret; +} + +/** + * usb4_usb3_port_release_bandwidth() - Release allocated USB3 bandwidth + * @port: USB3 adapter port + * @upstream_bw: New allocated upstream bandwidth + * @downstream_bw: New allocated downstream bandwidth + * + * Releases USB3 allocated bandwidth down to what is actually consumed. + * The new bandwidth is returned in @upstream_bw and @downstream_bw. + * + * Returns 0% in success and negative errno in case of failure. + */ +int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw, + int *downstream_bw) +{ + int ret, consumed_up, consumed_down; + + ret = usb4_usb3_port_set_cm_request(port); + if (ret) + return ret; + + ret = usb4_usb3_port_read_consumed_bandwidth(port, &consumed_up, + &consumed_down); + if (ret) + goto err_request; + + /* + * Always keep 1000 Mb/s to make sure xHCI has at least some + * bandwidth available for isochronous traffic. + */ + if (consumed_up < 1000) + consumed_up = 1000; + if (consumed_down < 1000) + consumed_down = 1000; + + ret = usb4_usb3_port_write_allocated_bandwidth(port, consumed_up, + consumed_down); + if (ret) + goto err_request; + + *upstream_bw = consumed_up; + *downstream_bw = consumed_down; + +err_request: + usb4_usb3_port_clear_cm_request(port); + return ret; +} diff --git a/drivers/thunderbolt/usb4_port.c b/drivers/thunderbolt/usb4_port.c new file mode 100644 index 000000000000..1a30c0a23286 --- /dev/null +++ b/drivers/thunderbolt/usb4_port.c @@ -0,0 +1,320 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * USB4 port device + * + * Copyright (C) 2021, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/pm_runtime.h> +#include <linux/component.h> +#include <linux/property.h> + +#include "tb.h" + +static int connector_bind(struct device *dev, struct device *connector, void *data) +{ + int ret; + + ret = sysfs_create_link(&dev->kobj, &connector->kobj, "connector"); + if (ret) + return ret; + + ret = sysfs_create_link(&connector->kobj, &dev->kobj, dev_name(dev)); + if (ret) + sysfs_remove_link(&dev->kobj, "connector"); + + return ret; +} + +static void connector_unbind(struct device *dev, struct device *connector, void *data) +{ + sysfs_remove_link(&connector->kobj, dev_name(dev)); + sysfs_remove_link(&dev->kobj, "connector"); +} + +static const struct component_ops connector_ops = { + .bind = connector_bind, + .unbind = connector_unbind, +}; + +static ssize_t link_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct usb4_port *usb4 = tb_to_usb4_port_device(dev); + struct tb_port *port = usb4->port; + struct tb *tb = port->sw->tb; + const char *link; + + if (mutex_lock_interruptible(&tb->lock)) + return -ERESTARTSYS; + + if (tb_is_upstream_port(port)) + link = port->sw->link_usb4 ? "usb4" : "tbt"; + else if (tb_port_has_remote(port)) + link = port->remote->sw->link_usb4 ? "usb4" : "tbt"; + else if (port->xdomain) + link = port->xdomain->link_usb4 ? "usb4" : "tbt"; + else + link = "none"; + + mutex_unlock(&tb->lock); + + return sysfs_emit(buf, "%s\n", link); +} +static DEVICE_ATTR_RO(link); + +static struct attribute *common_attrs[] = { + &dev_attr_link.attr, + NULL +}; + +static const struct attribute_group common_group = { + .attrs = common_attrs, +}; + +static int usb4_port_offline(struct usb4_port *usb4) +{ + struct tb_port *port = usb4->port; + int ret; + + ret = tb_acpi_power_on_retimers(port); + if (ret) + return ret; + + ret = usb4_port_router_offline(port); + if (ret) { + tb_acpi_power_off_retimers(port); + return ret; + } + + ret = tb_retimer_scan(port, false); + if (ret) { + usb4_port_router_online(port); + tb_acpi_power_off_retimers(port); + } + + return ret; +} + +static void usb4_port_online(struct usb4_port *usb4) +{ + struct tb_port *port = usb4->port; + + usb4_port_router_online(port); + tb_acpi_power_off_retimers(port); +} + +static ssize_t offline_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct usb4_port *usb4 = tb_to_usb4_port_device(dev); + + return sysfs_emit(buf, "%d\n", usb4->offline); +} + +static ssize_t offline_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct usb4_port *usb4 = tb_to_usb4_port_device(dev); + struct tb_port *port = usb4->port; + struct tb *tb = port->sw->tb; + bool val; + int ret; + + ret = kstrtobool(buf, &val); + if (ret) + return ret; + + pm_runtime_get_sync(&usb4->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_rpm; + } + + if (val == usb4->offline) + goto out_unlock; + + /* Offline mode works only for ports that are not connected */ + if (tb_port_has_remote(port)) { + ret = -EBUSY; + goto out_unlock; + } + + if (val) { + ret = usb4_port_offline(usb4); + if (ret) + goto out_unlock; + } else { + usb4_port_online(usb4); + tb_retimer_remove_all(port); + } + + usb4->offline = val; + tb_port_dbg(port, "%s offline mode\n", val ? "enter" : "exit"); + +out_unlock: + mutex_unlock(&tb->lock); +out_rpm: + pm_runtime_mark_last_busy(&usb4->dev); + pm_runtime_put_autosuspend(&usb4->dev); + + return ret ? ret : count; +} +static DEVICE_ATTR_RW(offline); + +static ssize_t rescan_store(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count) +{ + struct usb4_port *usb4 = tb_to_usb4_port_device(dev); + struct tb_port *port = usb4->port; + struct tb *tb = port->sw->tb; + bool val; + int ret; + + ret = kstrtobool(buf, &val); + if (ret) + return ret; + + if (!val) + return count; + + pm_runtime_get_sync(&usb4->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_rpm; + } + + /* Must be in offline mode already */ + if (!usb4->offline) { + ret = -EINVAL; + goto out_unlock; + } + + tb_retimer_remove_all(port); + ret = tb_retimer_scan(port, true); + +out_unlock: + mutex_unlock(&tb->lock); +out_rpm: + pm_runtime_mark_last_busy(&usb4->dev); + pm_runtime_put_autosuspend(&usb4->dev); + + return ret ? ret : count; +} +static DEVICE_ATTR_WO(rescan); + +static struct attribute *service_attrs[] = { + &dev_attr_offline.attr, + &dev_attr_rescan.attr, + NULL +}; + +static umode_t service_attr_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct usb4_port *usb4 = tb_to_usb4_port_device(dev); + + /* + * Always need some platform help to cycle the modes so that + * retimers can be accessed through the sideband. + */ + return usb4->can_offline ? attr->mode : 0; +} + +static const struct attribute_group service_group = { + .attrs = service_attrs, + .is_visible = service_attr_is_visible, +}; + +static const struct attribute_group *usb4_port_device_groups[] = { + &common_group, + &service_group, + NULL +}; + +static void usb4_port_device_release(struct device *dev) +{ + struct usb4_port *usb4 = container_of(dev, struct usb4_port, dev); + + kfree(usb4); +} + +struct device_type usb4_port_device_type = { + .name = "usb4_port", + .groups = usb4_port_device_groups, + .release = usb4_port_device_release, +}; + +/** + * usb4_port_device_add() - Add USB4 port device + * @port: Lane 0 adapter port to add the USB4 port + * + * Creates and registers a USB4 port device for @port. Returns the new + * USB4 port device pointer or ERR_PTR() in case of error. + */ +struct usb4_port *usb4_port_device_add(struct tb_port *port) +{ + struct usb4_port *usb4; + int ret; + + usb4 = kzalloc(sizeof(*usb4), GFP_KERNEL); + if (!usb4) + return ERR_PTR(-ENOMEM); + + usb4->port = port; + usb4->dev.type = &usb4_port_device_type; + usb4->dev.parent = &port->sw->dev; + dev_set_name(&usb4->dev, "usb4_port%d", port->port); + + ret = device_register(&usb4->dev); + if (ret) { + put_device(&usb4->dev); + return ERR_PTR(ret); + } + + if (dev_fwnode(&usb4->dev)) { + ret = component_add(&usb4->dev, &connector_ops); + if (ret) { + dev_err(&usb4->dev, "failed to add component\n"); + device_unregister(&usb4->dev); + } + } + + pm_runtime_no_callbacks(&usb4->dev); + pm_runtime_set_active(&usb4->dev); + pm_runtime_enable(&usb4->dev); + pm_runtime_set_autosuspend_delay(&usb4->dev, TB_AUTOSUSPEND_DELAY); + pm_runtime_mark_last_busy(&usb4->dev); + pm_runtime_use_autosuspend(&usb4->dev); + + return usb4; +} + +/** + * usb4_port_device_remove() - Removes USB4 port device + * @usb4: USB4 port device + * + * Unregisters the USB4 port device from the system. The device will be + * released when the last reference is dropped. + */ +void usb4_port_device_remove(struct usb4_port *usb4) +{ + if (dev_fwnode(&usb4->dev)) + component_del(&usb4->dev, &connector_ops); + device_unregister(&usb4->dev); +} + +/** + * usb4_port_device_resume() - Resumes USB4 port device + * @usb4: USB4 port device + * + * Used to resume USB4 port device after sleep state. + */ +int usb4_port_device_resume(struct usb4_port *usb4) +{ + return usb4->offline ? usb4_port_offline(usb4) : 0; +} diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c index 053f918e00e8..f00b2f62d8e3 100644 --- a/drivers/thunderbolt/xdomain.c +++ b/drivers/thunderbolt/xdomain.c @@ -8,19 +8,48 @@ */ #include <linux/device.h> +#include <linux/delay.h> #include <linux/kmod.h> #include <linux/module.h> #include <linux/pm_runtime.h> +#include <linux/prandom.h> #include <linux/utsname.h> #include <linux/uuid.h> #include <linux/workqueue.h> #include "tb.h" -#define XDOMAIN_DEFAULT_TIMEOUT 5000 /* ms */ -#define XDOMAIN_UUID_RETRIES 10 -#define XDOMAIN_PROPERTIES_RETRIES 60 -#define XDOMAIN_PROPERTIES_CHANGED_RETRIES 10 +#define XDOMAIN_SHORT_TIMEOUT 100 /* ms */ +#define XDOMAIN_DEFAULT_TIMEOUT 1000 /* ms */ +#define XDOMAIN_BONDING_TIMEOUT 10000 /* ms */ +#define XDOMAIN_RETRIES 10 +#define XDOMAIN_DEFAULT_MAX_HOPID 15 + +enum { + XDOMAIN_STATE_INIT, + XDOMAIN_STATE_UUID, + XDOMAIN_STATE_LINK_STATUS, + XDOMAIN_STATE_LINK_STATE_CHANGE, + XDOMAIN_STATE_LINK_STATUS2, + XDOMAIN_STATE_BONDING_UUID_LOW, + XDOMAIN_STATE_BONDING_UUID_HIGH, + XDOMAIN_STATE_PROPERTIES, + XDOMAIN_STATE_ENUMERATED, + XDOMAIN_STATE_ERROR, +}; + +static const char * const state_names[] = { + [XDOMAIN_STATE_INIT] = "INIT", + [XDOMAIN_STATE_UUID] = "UUID", + [XDOMAIN_STATE_LINK_STATUS] = "LINK_STATUS", + [XDOMAIN_STATE_LINK_STATE_CHANGE] = "LINK_STATE_CHANGE", + [XDOMAIN_STATE_LINK_STATUS2] = "LINK_STATUS2", + [XDOMAIN_STATE_BONDING_UUID_LOW] = "BONDING_UUID_LOW", + [XDOMAIN_STATE_BONDING_UUID_HIGH] = "BONDING_UUID_HIGH", + [XDOMAIN_STATE_PROPERTIES] = "PROPERTIES", + [XDOMAIN_STATE_ENUMERATED] = "ENUMERATED", + [XDOMAIN_STATE_ERROR] = "ERROR", +}; struct xdomain_request_work { struct work_struct work; @@ -28,13 +57,19 @@ struct xdomain_request_work { struct tb *tb; }; -/* Serializes access to the properties and protocol handlers below */ +static bool tb_xdomain_enabled = true; +module_param_named(xdomain, tb_xdomain_enabled, bool, 0444); +MODULE_PARM_DESC(xdomain, "allow XDomain protocol (default: true)"); + +/* + * Serializes access to the properties and protocol handlers below. If + * you need to take both this lock and the struct tb_xdomain lock, take + * this one first. + */ static DEFINE_MUTEX(xdomain_lock); /* Properties exposed to the remote domains */ static struct tb_property_dir *xdomain_property_dir; -static u32 *xdomain_property_block; -static u32 xdomain_property_block_len; static u32 xdomain_property_block_gen; /* Additional protocol handlers */ @@ -45,6 +80,11 @@ static const uuid_t tb_xdp_uuid = UUID_INIT(0xb638d70e, 0x42ff, 0x40bb, 0x97, 0xc2, 0x90, 0xe2, 0xc0, 0xb2, 0xff, 0x07); +bool tb_is_xdomain_enabled(void) +{ + return tb_xdomain_enabled && tb_acpi_is_xdomain_allowed(); +} + static bool tb_xdomain_match(const struct tb_cfg_request *req, const struct ctl_pkg *pkg) { @@ -199,16 +239,12 @@ static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route, memcpy(&hdr->uuid, &tb_xdp_uuid, sizeof(tb_xdp_uuid)); } -static int tb_xdp_handle_error(const struct tb_xdp_header *hdr) +static int tb_xdp_handle_error(const struct tb_xdp_error_response *res) { - const struct tb_xdp_error_response *error; - - if (hdr->type != ERROR_RESPONSE) + if (res->hdr.type != ERROR_RESPONSE) return 0; - error = (const struct tb_xdp_error_response *)hdr; - - switch (error->error) { + switch (res->error) { case ERROR_UNKNOWN_PACKET: case ERROR_UNKNOWN_DOMAIN: return -EIO; @@ -224,7 +260,7 @@ static int tb_xdp_handle_error(const struct tb_xdp_header *hdr) } static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry, - uuid_t *uuid) + uuid_t *uuid, u64 *remote_route) { struct tb_xdp_uuid_response res; struct tb_xdp_uuid req; @@ -242,11 +278,13 @@ static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry, if (ret) return ret; - ret = tb_xdp_handle_error(&res.hdr); + ret = tb_xdp_handle_error(&res.err); if (ret) return ret; uuid_copy(uuid, &res.src_uuid); + *remote_route = (u64)res.src_route_hi << 32 | res.src_route_lo; + return 0; } @@ -314,7 +352,7 @@ static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route, if (ret) goto err; - ret = tb_xdp_handle_error(&res->hdr); + ret = tb_xdp_handle_error(&res->err); if (ret) goto err; @@ -374,8 +412,7 @@ err: } static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl, - u64 route, u8 sequence, const uuid_t *src_uuid, - const struct tb_xdp_properties *req) + struct tb_xdomain *xd, u8 sequence, const struct tb_xdp_properties *req) { struct tb_xdp_properties_response *res; size_t total_size; @@ -387,39 +424,39 @@ static int tb_xdp_properties_response(struct tb *tb, struct tb_ctl *ctl, * protocol supports forwarding, though which we might add * support later on. */ - if (!uuid_equal(src_uuid, &req->dst_uuid)) { - tb_xdp_error_response(ctl, route, sequence, + if (!uuid_equal(xd->local_uuid, &req->dst_uuid)) { + tb_xdp_error_response(ctl, xd->route, sequence, ERROR_UNKNOWN_DOMAIN); return 0; } - mutex_lock(&xdomain_lock); + mutex_lock(&xd->lock); - if (req->offset >= xdomain_property_block_len) { - mutex_unlock(&xdomain_lock); + if (req->offset >= xd->local_property_block_len) { + mutex_unlock(&xd->lock); return -EINVAL; } - len = xdomain_property_block_len - req->offset; + len = xd->local_property_block_len - req->offset; len = min_t(u16, len, TB_XDP_PROPERTIES_MAX_DATA_LENGTH); total_size = sizeof(*res) + len * 4; res = kzalloc(total_size, GFP_KERNEL); if (!res) { - mutex_unlock(&xdomain_lock); + mutex_unlock(&xd->lock); return -ENOMEM; } - tb_xdp_fill_header(&res->hdr, route, sequence, PROPERTIES_RESPONSE, + tb_xdp_fill_header(&res->hdr, xd->route, sequence, PROPERTIES_RESPONSE, total_size); - res->generation = xdomain_property_block_gen; - res->data_length = xdomain_property_block_len; + res->generation = xd->local_property_block_gen; + res->data_length = xd->local_property_block_len; res->offset = req->offset; - uuid_copy(&res->src_uuid, src_uuid); + uuid_copy(&res->src_uuid, xd->local_uuid); uuid_copy(&res->dst_uuid, &req->src_uuid); - memcpy(res->data, &xdomain_property_block[req->offset], len * 4); + memcpy(res->data, &xd->local_property_block[req->offset], len * 4); - mutex_unlock(&xdomain_lock); + mutex_unlock(&xd->lock); ret = __tb_xdomain_response(ctl, res, total_size, TB_CFG_PKG_XDOMAIN_RESP); @@ -448,7 +485,7 @@ static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route, if (ret) return ret; - return tb_xdp_handle_error(&res.hdr); + return tb_xdp_handle_error(&res.err); } static int @@ -463,6 +500,112 @@ tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence) TB_CFG_PKG_XDOMAIN_RESP); } +static int tb_xdp_link_state_status_request(struct tb_ctl *ctl, u64 route, + u8 sequence, u8 *slw, u8 *tlw, + u8 *sls, u8 *tls) +{ + struct tb_xdp_link_state_status_response res; + struct tb_xdp_link_state_status req; + int ret; + + memset(&req, 0, sizeof(req)); + tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_STATUS_REQUEST, + sizeof(req)); + + memset(&res, 0, sizeof(res)); + ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ, + &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP, + XDOMAIN_DEFAULT_TIMEOUT); + if (ret) + return ret; + + ret = tb_xdp_handle_error(&res.err); + if (ret) + return ret; + + if (res.status != 0) + return -EREMOTEIO; + + *slw = res.slw; + *tlw = res.tlw; + *sls = res.sls; + *tls = res.tls; + + return 0; +} + +static int tb_xdp_link_state_status_response(struct tb *tb, struct tb_ctl *ctl, + struct tb_xdomain *xd, u8 sequence) +{ + struct tb_switch *sw = tb_to_switch(xd->dev.parent); + struct tb_xdp_link_state_status_response res; + struct tb_port *port = tb_port_at(xd->route, sw); + u32 val[2]; + int ret; + + memset(&res, 0, sizeof(res)); + tb_xdp_fill_header(&res.hdr, xd->route, sequence, + LINK_STATE_STATUS_RESPONSE, sizeof(res)); + + ret = tb_port_read(port, val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_0, ARRAY_SIZE(val)); + if (ret) + return ret; + + res.slw = (val[0] & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> + LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; + res.sls = (val[0] & LANE_ADP_CS_0_SUPPORTED_SPEED_MASK) >> + LANE_ADP_CS_0_SUPPORTED_SPEED_SHIFT; + res.tls = val[1] & LANE_ADP_CS_1_TARGET_SPEED_MASK; + res.tlw = (val[1] & LANE_ADP_CS_1_TARGET_WIDTH_MASK) >> + LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; + + return __tb_xdomain_response(ctl, &res, sizeof(res), + TB_CFG_PKG_XDOMAIN_RESP); +} + +static int tb_xdp_link_state_change_request(struct tb_ctl *ctl, u64 route, + u8 sequence, u8 tlw, u8 tls) +{ + struct tb_xdp_link_state_change_response res; + struct tb_xdp_link_state_change req; + int ret; + + memset(&req, 0, sizeof(req)); + tb_xdp_fill_header(&req.hdr, route, sequence, LINK_STATE_CHANGE_REQUEST, + sizeof(req)); + req.tlw = tlw; + req.tls = tls; + + memset(&res, 0, sizeof(res)); + ret = __tb_xdomain_request(ctl, &req, sizeof(req), TB_CFG_PKG_XDOMAIN_REQ, + &res, sizeof(res), TB_CFG_PKG_XDOMAIN_RESP, + XDOMAIN_DEFAULT_TIMEOUT); + if (ret) + return ret; + + ret = tb_xdp_handle_error(&res.err); + if (ret) + return ret; + + return res.status != 0 ? -EREMOTEIO : 0; +} + +static int tb_xdp_link_state_change_response(struct tb_ctl *ctl, u64 route, + u8 sequence, u32 status) +{ + struct tb_xdp_link_state_change_response res; + + memset(&res, 0, sizeof(res)); + tb_xdp_fill_header(&res.hdr, route, sequence, LINK_STATE_CHANGE_RESPONSE, + sizeof(res)); + + res.status = status; + + return __tb_xdomain_response(ctl, &res, sizeof(res), + TB_CFG_PKG_XDOMAIN_RESP); +} + /** * tb_register_protocol_handler() - Register protocol handler * @handler: Handler to register @@ -501,6 +644,66 @@ void tb_unregister_protocol_handler(struct tb_protocol_handler *handler) } EXPORT_SYMBOL_GPL(tb_unregister_protocol_handler); +static void update_property_block(struct tb_xdomain *xd) +{ + mutex_lock(&xdomain_lock); + mutex_lock(&xd->lock); + /* + * If the local property block is not up-to-date, rebuild it now + * based on the global property template. + */ + if (!xd->local_property_block || + xd->local_property_block_gen < xdomain_property_block_gen) { + struct tb_property_dir *dir; + int ret, block_len; + u32 *block; + + dir = tb_property_copy_dir(xdomain_property_dir); + if (!dir) { + dev_warn(&xd->dev, "failed to copy properties\n"); + goto out_unlock; + } + + /* Fill in non-static properties now */ + tb_property_add_text(dir, "deviceid", utsname()->nodename); + tb_property_add_immediate(dir, "maxhopid", xd->local_max_hopid); + + ret = tb_property_format_dir(dir, NULL, 0); + if (ret < 0) { + dev_warn(&xd->dev, "local property block creation failed\n"); + tb_property_free_dir(dir); + goto out_unlock; + } + + block_len = ret; + block = kcalloc(block_len, sizeof(*block), GFP_KERNEL); + if (!block) { + tb_property_free_dir(dir); + goto out_unlock; + } + + ret = tb_property_format_dir(dir, block, block_len); + if (ret) { + dev_warn(&xd->dev, "property block generation failed\n"); + tb_property_free_dir(dir); + kfree(block); + goto out_unlock; + } + + tb_property_free_dir(dir); + /* Release the previous block */ + kfree(xd->local_property_block); + /* Assign new one */ + xd->local_property_block = block; + xd->local_property_block_len = block_len; + xd->local_property_block_gen = xdomain_property_block_gen; + } + +out_unlock: + mutex_unlock(&xd->lock); + mutex_unlock(&xdomain_lock); +} + static void tb_xdp_handle_request(struct work_struct *work) { struct xdomain_request_work *xw = container_of(work, typeof(*xw), work); @@ -508,6 +711,7 @@ static void tb_xdp_handle_request(struct work_struct *work) const struct tb_xdomain_header *xhdr = &pkg->xd_hdr; struct tb *tb = xw->tb; struct tb_ctl *ctl = tb->ctl; + struct tb_xdomain *xd; const uuid_t *uuid; int ret = 0; u32 sequence; @@ -529,16 +733,22 @@ static void tb_xdp_handle_request(struct work_struct *work) goto out; } + xd = tb_xdomain_find_by_route_locked(tb, route); + if (xd) + update_property_block(xd); + switch (pkg->type) { case PROPERTIES_REQUEST: - ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid, - (const struct tb_xdp_properties *)pkg); + tb_dbg(tb, "%llx: received XDomain properties request\n", route); + if (xd) { + ret = tb_xdp_properties_response(tb, ctl, xd, sequence, + (const struct tb_xdp_properties *)pkg); + } break; - case PROPERTIES_CHANGED_REQUEST: { - const struct tb_xdp_properties_changed *xchg = - (const struct tb_xdp_properties_changed *)pkg; - struct tb_xdomain *xd; + case PROPERTIES_CHANGED_REQUEST: + tb_dbg(tb, "%llx: received XDomain properties changed request\n", + route); ret = tb_xdp_properties_changed_response(ctl, route, sequence); @@ -547,27 +757,58 @@ static void tb_xdp_handle_request(struct work_struct *work) * the xdomain related to this connection as well in * case there is a change in services it offers. */ - xd = tb_xdomain_find_by_uuid_locked(tb, &xchg->src_uuid); - if (xd) { - queue_delayed_work(tb->wq, &xd->get_properties_work, - msecs_to_jiffies(50)); - tb_xdomain_put(xd); - } - + if (xd && device_is_registered(&xd->dev)) + queue_delayed_work(tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT)); break; - } case UUID_REQUEST_OLD: case UUID_REQUEST: + tb_dbg(tb, "%llx: received XDomain UUID request\n", route); ret = tb_xdp_uuid_response(ctl, route, sequence, uuid); break; + case LINK_STATE_STATUS_REQUEST: + tb_dbg(tb, "%llx: received XDomain link state status request\n", + route); + + if (xd) { + ret = tb_xdp_link_state_status_response(tb, ctl, xd, + sequence); + } else { + tb_xdp_error_response(ctl, route, sequence, + ERROR_NOT_READY); + } + break; + + case LINK_STATE_CHANGE_REQUEST: + tb_dbg(tb, "%llx: received XDomain link state change request\n", + route); + + if (xd && xd->state == XDOMAIN_STATE_BONDING_UUID_HIGH) { + const struct tb_xdp_link_state_change *lsc = + (const struct tb_xdp_link_state_change *)pkg; + + ret = tb_xdp_link_state_change_response(ctl, route, + sequence, 0); + xd->target_link_width = lsc->tlw; + queue_delayed_work(tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT)); + } else { + tb_xdp_error_response(ctl, route, sequence, + ERROR_NOT_READY); + } + break; + default: + tb_dbg(tb, "%llx: unknown XDomain request %#x\n", route, pkg->type); tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_SUPPORTED); break; } + tb_xdomain_put(xd); + if (ret) { tb_warn(tb, "failed to send XDomain response for %#x\n", pkg->type); @@ -617,7 +858,7 @@ EXPORT_SYMBOL_GPL(tb_register_service_driver); /** * tb_unregister_service_driver() - Unregister XDomain service driver - * @xdrv: Driver to unregister + * @drv: Driver to unregister * * Unregisters XDomain service driver from the bus. */ @@ -636,7 +877,7 @@ static ssize_t key_show(struct device *dev, struct device_attribute *attr, * It should be null terminated but anything else is pretty much * allowed. */ - return sprintf(buf, "%*pE\n", (int)strlen(svc->key), svc->key); + return sysfs_emit(buf, "%*pE\n", (int)strlen(svc->key), svc->key); } static DEVICE_ATTR_RO(key); @@ -653,7 +894,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, /* Full buffer size except new line and null termination */ get_modalias(svc, buf, PAGE_SIZE - 2); - return sprintf(buf, "%s\n", buf); + return strlen(strcat(buf, "\n")); } static DEVICE_ATTR_RO(modalias); @@ -662,7 +903,7 @@ static ssize_t prtcid_show(struct device *dev, struct device_attribute *attr, { struct tb_service *svc = container_of(dev, struct tb_service, dev); - return sprintf(buf, "%u\n", svc->prtcid); + return sysfs_emit(buf, "%u\n", svc->prtcid); } static DEVICE_ATTR_RO(prtcid); @@ -671,7 +912,7 @@ static ssize_t prtcvers_show(struct device *dev, struct device_attribute *attr, { struct tb_service *svc = container_of(dev, struct tb_service, dev); - return sprintf(buf, "%u\n", svc->prtcvers); + return sysfs_emit(buf, "%u\n", svc->prtcvers); } static DEVICE_ATTR_RO(prtcvers); @@ -680,7 +921,7 @@ static ssize_t prtcrevs_show(struct device *dev, struct device_attribute *attr, { struct tb_service *svc = container_of(dev, struct tb_service, dev); - return sprintf(buf, "%u\n", svc->prtcrevs); + return sysfs_emit(buf, "%u\n", svc->prtcrevs); } static DEVICE_ATTR_RO(prtcrevs); @@ -689,7 +930,7 @@ static ssize_t prtcstns_show(struct device *dev, struct device_attribute *attr, { struct tb_service *svc = container_of(dev, struct tb_service, dev); - return sprintf(buf, "0x%08x\n", svc->prtcstns); + return sysfs_emit(buf, "0x%08x\n", svc->prtcstns); } static DEVICE_ATTR_RO(prtcstns); @@ -703,7 +944,7 @@ static struct attribute *tb_service_attrs[] = { NULL, }; -static struct attribute_group tb_service_attr_group = { +static const struct attribute_group tb_service_attr_group = { .attrs = tb_service_attrs, }; @@ -726,6 +967,7 @@ static void tb_service_release(struct device *dev) struct tb_service *svc = container_of(dev, struct tb_service, dev); struct tb_xdomain *xd = tb_service_parent(svc); + tb_service_debugfs_remove(svc); ida_simple_remove(&xd->service_ids, svc->id); kfree(svc->key); kfree(svc); @@ -748,7 +990,7 @@ static int remove_missing_service(struct device *dev, void *data) if (!svc) return 0; - if (!tb_property_find(xd->properties, svc->key, + if (!tb_property_find(xd->remote_properties, svc->key, TB_PROPERTY_TYPE_DIRECTORY)) device_unregister(dev); @@ -808,7 +1050,7 @@ static void enumerate_services(struct tb_xdomain *xd) device_for_each_child_reverse(&xd->dev, xd, remove_missing_service); /* Then re-enumerate properties creating new services as we go */ - tb_property_for_each(xd->properties, p) { + tb_property_for_each(xd->remote_properties, p) { if (p->type != TB_PROPERTY_TYPE_DIRECTORY) continue; @@ -830,6 +1072,7 @@ static void enumerate_services(struct tb_xdomain *xd) id = ida_simple_get(&xd->service_ids, 0, 0, GFP_KERNEL); if (id < 0) { + kfree(svc->key); kfree(svc); break; } @@ -839,6 +1082,8 @@ static void enumerate_services(struct tb_xdomain *xd) svc->dev.parent = &xd->dev; dev_set_name(&svc->dev, "%s.%d", dev_name(&xd->dev), svc->id); + tb_service_debugfs_init(svc); + if (device_register(&svc->dev)) { put_device(&svc->dev); break; @@ -862,6 +1107,14 @@ static int populate_properties(struct tb_xdomain *xd, return -EINVAL; xd->vendor = p->value.immediate; + p = tb_property_find(dir, "maxhopid", TB_PROPERTY_TYPE_VALUE); + /* + * USB4 inter-domain spec suggests using 15 as HopID if the + * other end does not announce it in a property. This is for + * TBT3 compatibility. + */ + xd->remote_max_hopid = p ? p->value.immediate : XDOMAIN_DEFAULT_MAX_HOPID; + kfree(xd->device_name); xd->device_name = NULL; kfree(xd->vendor_name); @@ -878,41 +1131,69 @@ static int populate_properties(struct tb_xdomain *xd, return 0; } -/* Called with @xd->lock held */ -static void tb_xdomain_restore_paths(struct tb_xdomain *xd) +static int tb_xdomain_update_link_attributes(struct tb_xdomain *xd) { - if (!xd->resume) - return; + bool change = false; + struct tb_port *port; + int ret; - xd->resume = false; - if (xd->transmit_path) { - dev_dbg(&xd->dev, "re-establishing DMA path\n"); - tb_domain_approve_xdomain_paths(xd->tb, xd); - } + port = tb_port_at(xd->route, tb_xdomain_parent(xd)); + + ret = tb_port_get_link_speed(port); + if (ret < 0) + return ret; + + if (xd->link_speed != ret) + change = true; + + xd->link_speed = ret; + + ret = tb_port_get_link_width(port); + if (ret < 0) + return ret; + + if (xd->link_width != ret) + change = true; + + xd->link_width = ret; + + if (change) + kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE); + + return 0; } -static void tb_xdomain_get_uuid(struct work_struct *work) +static int tb_xdomain_get_uuid(struct tb_xdomain *xd) { - struct tb_xdomain *xd = container_of(work, typeof(*xd), - get_uuid_work.work); struct tb *tb = xd->tb; uuid_t uuid; + u64 route; int ret; - ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid); + dev_dbg(&xd->dev, "requesting remote UUID\n"); + + ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->state_retries, &uuid, + &route); if (ret < 0) { - if (xd->uuid_retries-- > 0) { - queue_delayed_work(xd->tb->wq, &xd->get_uuid_work, - msecs_to_jiffies(100)); + if (xd->state_retries-- > 0) { + dev_dbg(&xd->dev, "failed to request UUID, retrying\n"); + return -EAGAIN; } else { dev_dbg(&xd->dev, "failed to read remote UUID\n"); } - return; + return ret; } + dev_dbg(&xd->dev, "got remote UUID %pUb\n", &uuid); + if (uuid_equal(&uuid, xd->local_uuid)) { - dev_dbg(&xd->dev, "intra-domain loop detected\n"); - return; + if (route == xd->route) + dev_dbg(&xd->dev, "loop back detected\n"); + else + dev_dbg(&xd->dev, "intra-domain loop detected\n"); + + /* Don't bond lanes automatically for loops */ + xd->bonding_possible = false; } /* @@ -923,27 +1204,152 @@ static void tb_xdomain_get_uuid(struct work_struct *work) if (xd->remote_uuid && !uuid_equal(&uuid, xd->remote_uuid)) { dev_dbg(&xd->dev, "remote UUID is different, unplugging\n"); xd->is_unplugged = true; - return; + return -ENODEV; } /* First time fill in the missing UUID */ if (!xd->remote_uuid) { xd->remote_uuid = kmemdup(&uuid, sizeof(uuid_t), GFP_KERNEL); if (!xd->remote_uuid) - return; + return -ENOMEM; } - /* Now we can start the normal properties exchange */ - queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, - msecs_to_jiffies(100)); - queue_delayed_work(xd->tb->wq, &xd->get_properties_work, - msecs_to_jiffies(1000)); + return 0; } -static void tb_xdomain_get_properties(struct work_struct *work) +static int tb_xdomain_get_link_status(struct tb_xdomain *xd) +{ + struct tb *tb = xd->tb; + u8 slw, tlw, sls, tls; + int ret; + + dev_dbg(&xd->dev, "sending link state status request to %pUb\n", + xd->remote_uuid); + + ret = tb_xdp_link_state_status_request(tb->ctl, xd->route, + xd->state_retries, &slw, &tlw, &sls, + &tls); + if (ret) { + if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) { + dev_dbg(&xd->dev, + "failed to request remote link status, retrying\n"); + return -EAGAIN; + } + dev_dbg(&xd->dev, "failed to receive remote link status\n"); + return ret; + } + + dev_dbg(&xd->dev, "remote link supports width %#x speed %#x\n", slw, sls); + + if (slw < LANE_ADP_CS_0_SUPPORTED_WIDTH_DUAL) { + dev_dbg(&xd->dev, "remote adapter is single lane only\n"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int tb_xdomain_link_state_change(struct tb_xdomain *xd, + unsigned int width) +{ + struct tb_switch *sw = tb_to_switch(xd->dev.parent); + struct tb_port *port = tb_port_at(xd->route, sw); + struct tb *tb = xd->tb; + u8 tlw, tls; + u32 val; + int ret; + + if (width == 2) + tlw = LANE_ADP_CS_1_TARGET_WIDTH_DUAL; + else if (width == 1) + tlw = LANE_ADP_CS_1_TARGET_WIDTH_SINGLE; + else + return -EINVAL; + + /* Use the current target speed */ + ret = tb_port_read(port, &val, TB_CFG_PORT, port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + tls = val & LANE_ADP_CS_1_TARGET_SPEED_MASK; + + dev_dbg(&xd->dev, "sending link state change request with width %#x speed %#x\n", + tlw, tls); + + ret = tb_xdp_link_state_change_request(tb->ctl, xd->route, + xd->state_retries, tlw, tls); + if (ret) { + if (ret != -EOPNOTSUPP && xd->state_retries-- > 0) { + dev_dbg(&xd->dev, + "failed to change remote link state, retrying\n"); + return -EAGAIN; + } + dev_err(&xd->dev, "failed request link state change, aborting\n"); + return ret; + } + + dev_dbg(&xd->dev, "received link state change response\n"); + return 0; +} + +static int tb_xdomain_bond_lanes_uuid_high(struct tb_xdomain *xd) +{ + struct tb_port *port; + int ret, width; + + if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_SINGLE) { + width = 1; + } else if (xd->target_link_width == LANE_ADP_CS_1_TARGET_WIDTH_DUAL) { + width = 2; + } else { + if (xd->state_retries-- > 0) { + dev_dbg(&xd->dev, + "link state change request not received yet, retrying\n"); + return -EAGAIN; + } + dev_dbg(&xd->dev, "timeout waiting for link change request\n"); + return -ETIMEDOUT; + } + + port = tb_port_at(xd->route, tb_xdomain_parent(xd)); + + /* + * We can't use tb_xdomain_lane_bonding_enable() here because it + * is the other side that initiates lane bonding. So here we + * just set the width to both lane adapters and wait for the + * link to transition bonded. + */ + ret = tb_port_set_link_width(port->dual_link_port, width); + if (ret) { + tb_port_warn(port->dual_link_port, + "failed to set link width to %d\n", width); + return ret; + } + + ret = tb_port_set_link_width(port, width); + if (ret) { + tb_port_warn(port, "failed to set link width to %d\n", width); + return ret; + } + + ret = tb_port_wait_for_link_width(port, width, XDOMAIN_BONDING_TIMEOUT); + if (ret) { + dev_warn(&xd->dev, "error waiting for link width to become %d\n", + width); + return ret; + } + + port->bonded = width == 2; + port->dual_link_port->bonded = width == 2; + + tb_port_update_credits(port); + tb_xdomain_update_link_attributes(xd); + + dev_dbg(&xd->dev, "lane bonding %sabled\n", width == 2 ? "en" : "dis"); + return 0; +} + +static int tb_xdomain_get_properties(struct tb_xdomain *xd) { - struct tb_xdomain *xd = container_of(work, typeof(*xd), - get_properties_work.work); struct tb_property_dir *dir; struct tb *tb = xd->tb; bool update = false; @@ -951,41 +1357,38 @@ static void tb_xdomain_get_properties(struct work_struct *work) u32 gen = 0; int ret; + dev_dbg(&xd->dev, "requesting remote properties\n"); + ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid, - xd->remote_uuid, xd->properties_retries, + xd->remote_uuid, xd->state_retries, &block, &gen); if (ret < 0) { - if (xd->properties_retries-- > 0) { - queue_delayed_work(xd->tb->wq, &xd->get_properties_work, - msecs_to_jiffies(1000)); + if (xd->state_retries-- > 0) { + dev_dbg(&xd->dev, + "failed to request remote properties, retrying\n"); + return -EAGAIN; } else { /* Give up now */ dev_err(&xd->dev, "failed read XDomain properties from %pUb\n", xd->remote_uuid); } - return; - } - xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES; + return ret; + } mutex_lock(&xd->lock); /* Only accept newer generation properties */ - if (xd->properties && gen <= xd->property_block_gen) { - /* - * On resume it is likely that the properties block is - * not changed (unless the other end added or removed - * services). However, we need to make sure the existing - * DMA paths are restored properly. - */ - tb_xdomain_restore_paths(xd); + if (xd->remote_properties && gen <= xd->remote_property_block_gen) { + ret = 0; goto err_free_block; } dir = tb_property_parse_dir(block, ret); if (!dir) { dev_err(&xd->dev, "failed to parse XDomain properties\n"); + ret = -ENOMEM; goto err_free_block; } @@ -996,15 +1399,15 @@ static void tb_xdomain_get_properties(struct work_struct *work) } /* Release the existing one */ - if (xd->properties) { - tb_property_free_dir(xd->properties); + if (xd->remote_properties) { + tb_property_free_dir(xd->remote_properties); update = true; } - xd->properties = dir; - xd->property_block_gen = gen; + xd->remote_properties = dir; + xd->remote_property_block_gen = gen; - tb_xdomain_restore_paths(xd); + tb_xdomain_update_link_attributes(xd); mutex_unlock(&xd->lock); @@ -1016,22 +1419,216 @@ static void tb_xdomain_get_properties(struct work_struct *work) * registered, we notify the userspace that it has changed. */ if (!update) { + struct tb_port *port; + + /* Now disable lane 1 if bonding was not enabled */ + port = tb_port_at(xd->route, tb_xdomain_parent(xd)); + if (!port->bonded) + tb_port_disable(port->dual_link_port); + if (device_add(&xd->dev)) { dev_err(&xd->dev, "failed to add XDomain device\n"); - return; + return -ENODEV; } + dev_info(&xd->dev, "new host found, vendor=%#x device=%#x\n", + xd->vendor, xd->device); + if (xd->vendor_name && xd->device_name) + dev_info(&xd->dev, "%s %s\n", xd->vendor_name, + xd->device_name); + + tb_xdomain_debugfs_init(xd); } else { kobject_uevent(&xd->dev.kobj, KOBJ_CHANGE); } enumerate_services(xd); - return; + return 0; err_free_dir: tb_property_free_dir(dir); err_free_block: kfree(block); mutex_unlock(&xd->lock); + + return ret; +} + +static void tb_xdomain_queue_uuid(struct tb_xdomain *xd) +{ + xd->state = XDOMAIN_STATE_UUID; + xd->state_retries = XDOMAIN_RETRIES; + queue_delayed_work(xd->tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT)); +} + +static void tb_xdomain_queue_link_status(struct tb_xdomain *xd) +{ + xd->state = XDOMAIN_STATE_LINK_STATUS; + xd->state_retries = XDOMAIN_RETRIES; + queue_delayed_work(xd->tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); +} + +static void tb_xdomain_queue_link_status2(struct tb_xdomain *xd) +{ + xd->state = XDOMAIN_STATE_LINK_STATUS2; + xd->state_retries = XDOMAIN_RETRIES; + queue_delayed_work(xd->tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); +} + +static void tb_xdomain_queue_bonding(struct tb_xdomain *xd) +{ + if (memcmp(xd->local_uuid, xd->remote_uuid, UUID_SIZE) > 0) { + dev_dbg(&xd->dev, "we have higher UUID, other side bonds the lanes\n"); + xd->state = XDOMAIN_STATE_BONDING_UUID_HIGH; + } else { + dev_dbg(&xd->dev, "we have lower UUID, bonding lanes\n"); + xd->state = XDOMAIN_STATE_LINK_STATE_CHANGE; + } + + xd->state_retries = XDOMAIN_RETRIES; + queue_delayed_work(xd->tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); +} + +static void tb_xdomain_queue_bonding_uuid_low(struct tb_xdomain *xd) +{ + xd->state = XDOMAIN_STATE_BONDING_UUID_LOW; + xd->state_retries = XDOMAIN_RETRIES; + queue_delayed_work(xd->tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); +} + +static void tb_xdomain_queue_properties(struct tb_xdomain *xd) +{ + xd->state = XDOMAIN_STATE_PROPERTIES; + xd->state_retries = XDOMAIN_RETRIES; + queue_delayed_work(xd->tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); +} + +static void tb_xdomain_queue_properties_changed(struct tb_xdomain *xd) +{ + xd->properties_changed_retries = XDOMAIN_RETRIES; + queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, + msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT)); +} + +static void tb_xdomain_state_work(struct work_struct *work) +{ + struct tb_xdomain *xd = container_of(work, typeof(*xd), state_work.work); + int ret, state = xd->state; + + if (WARN_ON_ONCE(state < XDOMAIN_STATE_INIT || + state > XDOMAIN_STATE_ERROR)) + return; + + dev_dbg(&xd->dev, "running state %s\n", state_names[state]); + + switch (state) { + case XDOMAIN_STATE_INIT: + if (xd->needs_uuid) { + tb_xdomain_queue_uuid(xd); + } else { + tb_xdomain_queue_properties_changed(xd); + tb_xdomain_queue_properties(xd); + } + break; + + case XDOMAIN_STATE_UUID: + ret = tb_xdomain_get_uuid(xd); + if (ret) { + if (ret == -EAGAIN) + goto retry_state; + xd->state = XDOMAIN_STATE_ERROR; + } else { + tb_xdomain_queue_properties_changed(xd); + if (xd->bonding_possible) + tb_xdomain_queue_link_status(xd); + else + tb_xdomain_queue_properties(xd); + } + break; + + case XDOMAIN_STATE_LINK_STATUS: + ret = tb_xdomain_get_link_status(xd); + if (ret) { + if (ret == -EAGAIN) + goto retry_state; + + /* + * If any of the lane bonding states fail we skip + * bonding completely and try to continue from + * reading properties. + */ + tb_xdomain_queue_properties(xd); + } else { + tb_xdomain_queue_bonding(xd); + } + break; + + case XDOMAIN_STATE_LINK_STATE_CHANGE: + ret = tb_xdomain_link_state_change(xd, 2); + if (ret) { + if (ret == -EAGAIN) + goto retry_state; + tb_xdomain_queue_properties(xd); + } else { + tb_xdomain_queue_link_status2(xd); + } + break; + + case XDOMAIN_STATE_LINK_STATUS2: + ret = tb_xdomain_get_link_status(xd); + if (ret) { + if (ret == -EAGAIN) + goto retry_state; + tb_xdomain_queue_properties(xd); + } else { + tb_xdomain_queue_bonding_uuid_low(xd); + } + break; + + case XDOMAIN_STATE_BONDING_UUID_LOW: + tb_xdomain_lane_bonding_enable(xd); + tb_xdomain_queue_properties(xd); + break; + + case XDOMAIN_STATE_BONDING_UUID_HIGH: + if (tb_xdomain_bond_lanes_uuid_high(xd) == -EAGAIN) + goto retry_state; + tb_xdomain_queue_properties(xd); + break; + + case XDOMAIN_STATE_PROPERTIES: + ret = tb_xdomain_get_properties(xd); + if (ret) { + if (ret == -EAGAIN) + goto retry_state; + xd->state = XDOMAIN_STATE_ERROR; + } else { + xd->state = XDOMAIN_STATE_ENUMERATED; + } + break; + + case XDOMAIN_STATE_ENUMERATED: + tb_xdomain_queue_properties(xd); + break; + + case XDOMAIN_STATE_ERROR: + break; + + default: + dev_warn(&xd->dev, "unexpected state %d\n", state); + break; + } + + return; + +retry_state: + queue_delayed_work(xd->tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); } static void tb_xdomain_properties_changed(struct work_struct *work) @@ -1040,17 +1637,23 @@ static void tb_xdomain_properties_changed(struct work_struct *work) properties_changed_work.work); int ret; + dev_dbg(&xd->dev, "sending properties changed notification\n"); + ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route, xd->properties_changed_retries, xd->local_uuid); if (ret) { - if (xd->properties_changed_retries-- > 0) + if (xd->properties_changed_retries-- > 0) { + dev_dbg(&xd->dev, + "failed to send properties changed notification, retrying\n"); queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, - msecs_to_jiffies(1000)); + msecs_to_jiffies(XDOMAIN_DEFAULT_TIMEOUT)); + } + dev_err(&xd->dev, "failed to send properties changed notification\n"); return; } - xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES; + xd->properties_changed_retries = XDOMAIN_RETRIES; } static ssize_t device_show(struct device *dev, struct device_attribute *attr, @@ -1058,7 +1661,7 @@ static ssize_t device_show(struct device *dev, struct device_attribute *attr, { struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); - return sprintf(buf, "%#x\n", xd->device); + return sysfs_emit(buf, "%#x\n", xd->device); } static DEVICE_ATTR_RO(device); @@ -1070,19 +1673,28 @@ device_name_show(struct device *dev, struct device_attribute *attr, char *buf) if (mutex_lock_interruptible(&xd->lock)) return -ERESTARTSYS; - ret = sprintf(buf, "%s\n", xd->device_name ? xd->device_name : ""); + ret = sysfs_emit(buf, "%s\n", xd->device_name ?: ""); mutex_unlock(&xd->lock); return ret; } static DEVICE_ATTR_RO(device_name); +static ssize_t maxhopid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); + + return sysfs_emit(buf, "%d\n", xd->remote_max_hopid); +} +static DEVICE_ATTR_RO(maxhopid); + static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, char *buf) { struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); - return sprintf(buf, "%#x\n", xd->vendor); + return sysfs_emit(buf, "%#x\n", xd->vendor); } static DEVICE_ATTR_RO(vendor); @@ -1094,7 +1706,7 @@ vendor_name_show(struct device *dev, struct device_attribute *attr, char *buf) if (mutex_lock_interruptible(&xd->lock)) return -ERESTARTSYS; - ret = sprintf(buf, "%s\n", xd->vendor_name ? xd->vendor_name : ""); + ret = sysfs_emit(buf, "%s\n", xd->vendor_name ?: ""); mutex_unlock(&xd->lock); return ret; @@ -1106,20 +1718,47 @@ static ssize_t unique_id_show(struct device *dev, struct device_attribute *attr, { struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); - return sprintf(buf, "%pUb\n", xd->remote_uuid); + return sysfs_emit(buf, "%pUb\n", xd->remote_uuid); } static DEVICE_ATTR_RO(unique_id); +static ssize_t speed_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); + + return sysfs_emit(buf, "%u.0 Gb/s\n", xd->link_speed); +} + +static DEVICE_ATTR(rx_speed, 0444, speed_show, NULL); +static DEVICE_ATTR(tx_speed, 0444, speed_show, NULL); + +static ssize_t lanes_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct tb_xdomain *xd = container_of(dev, struct tb_xdomain, dev); + + return sysfs_emit(buf, "%u\n", xd->link_width); +} + +static DEVICE_ATTR(rx_lanes, 0444, lanes_show, NULL); +static DEVICE_ATTR(tx_lanes, 0444, lanes_show, NULL); + static struct attribute *xdomain_attrs[] = { &dev_attr_device.attr, &dev_attr_device_name.attr, + &dev_attr_maxhopid.attr, + &dev_attr_rx_lanes.attr, + &dev_attr_rx_speed.attr, + &dev_attr_tx_lanes.attr, + &dev_attr_tx_speed.attr, &dev_attr_unique_id.attr, &dev_attr_vendor.attr, &dev_attr_vendor_name.attr, NULL, }; -static struct attribute_group xdomain_attr_group = { +static const struct attribute_group xdomain_attr_group = { .attrs = xdomain_attrs, }; @@ -1134,7 +1773,10 @@ static void tb_xdomain_release(struct device *dev) put_device(xd->dev.parent); - tb_property_free_dir(xd->properties); + kfree(xd->local_property_block); + tb_property_free_dir(xd->remote_properties); + ida_destroy(&xd->out_hopids); + ida_destroy(&xd->in_hopids); ida_destroy(&xd->service_ids); kfree(xd->local_uuid); @@ -1146,31 +1788,17 @@ static void tb_xdomain_release(struct device *dev) static void start_handshake(struct tb_xdomain *xd) { - xd->uuid_retries = XDOMAIN_UUID_RETRIES; - xd->properties_retries = XDOMAIN_PROPERTIES_RETRIES; - xd->properties_changed_retries = XDOMAIN_PROPERTIES_CHANGED_RETRIES; - - if (xd->needs_uuid) { - queue_delayed_work(xd->tb->wq, &xd->get_uuid_work, - msecs_to_jiffies(100)); - } else { - /* Start exchanging properties with the other host */ - queue_delayed_work(xd->tb->wq, &xd->properties_changed_work, - msecs_to_jiffies(100)); - queue_delayed_work(xd->tb->wq, &xd->get_properties_work, - msecs_to_jiffies(1000)); - } + xd->state = XDOMAIN_STATE_INIT; + queue_delayed_work(xd->tb->wq, &xd->state_work, + msecs_to_jiffies(XDOMAIN_SHORT_TIMEOUT)); } static void stop_handshake(struct tb_xdomain *xd) { - xd->uuid_retries = 0; - xd->properties_retries = 0; - xd->properties_changed_retries = 0; - - cancel_delayed_work_sync(&xd->get_uuid_work); - cancel_delayed_work_sync(&xd->get_properties_work); cancel_delayed_work_sync(&xd->properties_changed_work); + cancel_delayed_work_sync(&xd->state_work); + xd->properties_changed_retries = 0; + xd->state_retries = 0; } static int __maybe_unused tb_xdomain_suspend(struct device *dev) @@ -1181,15 +1809,7 @@ static int __maybe_unused tb_xdomain_suspend(struct device *dev) static int __maybe_unused tb_xdomain_resume(struct device *dev) { - struct tb_xdomain *xd = tb_to_xdomain(dev); - - /* - * Ask tb_xdomain_get_properties() restore any existing DMA - * paths after properties are re-read. - */ - xd->resume = true; - start_handshake(xd); - + start_handshake(tb_to_xdomain(dev)); return 0; } @@ -1234,10 +1854,12 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, xd->tb = tb; xd->route = route; + xd->local_max_hopid = down->config.max_in_hop_id; ida_init(&xd->service_ids); + ida_init(&xd->in_hopids); + ida_init(&xd->out_hopids); mutex_init(&xd->lock); - INIT_DELAYED_WORK(&xd->get_uuid_work, tb_xdomain_get_uuid); - INIT_DELAYED_WORK(&xd->get_properties_work, tb_xdomain_get_properties); + INIT_DELAYED_WORK(&xd->state_work, tb_xdomain_state_work); INIT_DELAYED_WORK(&xd->properties_changed_work, tb_xdomain_properties_changed); @@ -1252,6 +1874,7 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, goto err_free_local_uuid; } else { xd->needs_uuid = true; + xd->bonding_possible = !!down->dual_link_port; } device_initialize(&xd->dev); @@ -1261,6 +1884,10 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, xd->dev.groups = xdomain_attr_groups; dev_set_name(&xd->dev, "%u-%llx", tb->index, route); + dev_dbg(&xd->dev, "local UUID %pUb\n", local_uuid); + if (remote_uuid) + dev_dbg(&xd->dev, "remote UUID %pUb\n", remote_uuid); + /* * This keeps the DMA powered on as long as we have active * connection to another host. @@ -1310,6 +1937,8 @@ static int unregister_service(struct device *dev, void *data) */ void tb_xdomain_remove(struct tb_xdomain *xd) { + tb_xdomain_debugfs_remove(xd); + stop_handshake(xd); device_for_each_child_reverse(&xd->dev, xd, unregister_service); @@ -1323,80 +1952,201 @@ void tb_xdomain_remove(struct tb_xdomain *xd) pm_runtime_put_noidle(&xd->dev); pm_runtime_set_suspended(&xd->dev); - if (!device_is_registered(&xd->dev)) + if (!device_is_registered(&xd->dev)) { put_device(&xd->dev); - else + } else { + dev_info(&xd->dev, "host disconnected\n"); device_unregister(&xd->dev); + } } /** - * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection + * tb_xdomain_lane_bonding_enable() - Enable lane bonding on XDomain * @xd: XDomain connection - * @transmit_path: HopID of the transmit path the other end is using to - * send packets - * @transmit_ring: DMA ring used to receive packets from the other end - * @receive_path: HopID of the receive path the other end is using to - * receive packets - * @receive_ring: DMA ring used to send packets to the other end * - * The function enables DMA paths accordingly so that after successful - * return the caller can send and receive packets using high-speed DMA - * path. + * Lane bonding is disabled by default for XDomains. This function tries + * to enable bonding by first enabling the port and waiting for the CL0 + * state. * - * Return: %0 in case of success and negative errno in case of error + * Return: %0 in case of success and negative errno in case of error. */ -int tb_xdomain_enable_paths(struct tb_xdomain *xd, u16 transmit_path, - u16 transmit_ring, u16 receive_path, - u16 receive_ring) +int tb_xdomain_lane_bonding_enable(struct tb_xdomain *xd) { + struct tb_port *port; int ret; - mutex_lock(&xd->lock); + port = tb_port_at(xd->route, tb_xdomain_parent(xd)); + if (!port->dual_link_port) + return -ENODEV; + + ret = tb_port_enable(port->dual_link_port); + if (ret) + return ret; + + ret = tb_wait_for_port(port->dual_link_port, true); + if (ret < 0) + return ret; + if (!ret) + return -ENOTCONN; - if (xd->transmit_path) { - ret = xd->transmit_path == transmit_path ? 0 : -EBUSY; - goto exit_unlock; + ret = tb_port_lane_bonding_enable(port); + if (ret) { + tb_port_warn(port, "failed to enable lane bonding\n"); + return ret; } - xd->transmit_path = transmit_path; - xd->transmit_ring = transmit_ring; - xd->receive_path = receive_path; - xd->receive_ring = receive_ring; + ret = tb_port_wait_for_link_width(port, 2, XDOMAIN_BONDING_TIMEOUT); + if (ret) { + tb_port_warn(port, "failed to enable lane bonding\n"); + return ret; + } - ret = tb_domain_approve_xdomain_paths(xd->tb, xd); + tb_port_update_credits(port); + tb_xdomain_update_link_attributes(xd); -exit_unlock: - mutex_unlock(&xd->lock); + dev_dbg(&xd->dev, "lane bonding enabled\n"); + return 0; +} +EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_enable); - return ret; +/** + * tb_xdomain_lane_bonding_disable() - Disable lane bonding + * @xd: XDomain connection + * + * Lane bonding is disabled by default for XDomains. If bonding has been + * enabled, this function can be used to disable it. + */ +void tb_xdomain_lane_bonding_disable(struct tb_xdomain *xd) +{ + struct tb_port *port; + + port = tb_port_at(xd->route, tb_xdomain_parent(xd)); + if (port->dual_link_port) { + tb_port_lane_bonding_disable(port); + if (tb_port_wait_for_link_width(port, 1, 100) == -ETIMEDOUT) + tb_port_warn(port, "timeout disabling lane bonding\n"); + tb_port_disable(port->dual_link_port); + tb_port_update_credits(port); + tb_xdomain_update_link_attributes(xd); + + dev_dbg(&xd->dev, "lane bonding disabled\n"); + } +} +EXPORT_SYMBOL_GPL(tb_xdomain_lane_bonding_disable); + +/** + * tb_xdomain_alloc_in_hopid() - Allocate input HopID for tunneling + * @xd: XDomain connection + * @hopid: Preferred HopID or %-1 for next available + * + * Returns allocated HopID or negative errno. Specifically returns + * %-ENOSPC if there are no more available HopIDs. Returned HopID is + * guaranteed to be within range supported by the input lane adapter. + * Call tb_xdomain_release_in_hopid() to release the allocated HopID. + */ +int tb_xdomain_alloc_in_hopid(struct tb_xdomain *xd, int hopid) +{ + if (hopid < 0) + hopid = TB_PATH_MIN_HOPID; + if (hopid < TB_PATH_MIN_HOPID || hopid > xd->local_max_hopid) + return -EINVAL; + + return ida_alloc_range(&xd->in_hopids, hopid, xd->local_max_hopid, + GFP_KERNEL); +} +EXPORT_SYMBOL_GPL(tb_xdomain_alloc_in_hopid); + +/** + * tb_xdomain_alloc_out_hopid() - Allocate output HopID for tunneling + * @xd: XDomain connection + * @hopid: Preferred HopID or %-1 for next available + * + * Returns allocated HopID or negative errno. Specifically returns + * %-ENOSPC if there are no more available HopIDs. Returned HopID is + * guaranteed to be within range supported by the output lane adapter. + * Call tb_xdomain_release_in_hopid() to release the allocated HopID. + */ +int tb_xdomain_alloc_out_hopid(struct tb_xdomain *xd, int hopid) +{ + if (hopid < 0) + hopid = TB_PATH_MIN_HOPID; + if (hopid < TB_PATH_MIN_HOPID || hopid > xd->remote_max_hopid) + return -EINVAL; + + return ida_alloc_range(&xd->out_hopids, hopid, xd->remote_max_hopid, + GFP_KERNEL); +} +EXPORT_SYMBOL_GPL(tb_xdomain_alloc_out_hopid); + +/** + * tb_xdomain_release_in_hopid() - Release input HopID + * @xd: XDomain connection + * @hopid: HopID to release + */ +void tb_xdomain_release_in_hopid(struct tb_xdomain *xd, int hopid) +{ + ida_free(&xd->in_hopids, hopid); +} +EXPORT_SYMBOL_GPL(tb_xdomain_release_in_hopid); + +/** + * tb_xdomain_release_out_hopid() - Release output HopID + * @xd: XDomain connection + * @hopid: HopID to release + */ +void tb_xdomain_release_out_hopid(struct tb_xdomain *xd, int hopid) +{ + ida_free(&xd->out_hopids, hopid); +} +EXPORT_SYMBOL_GPL(tb_xdomain_release_out_hopid); + +/** + * tb_xdomain_enable_paths() - Enable DMA paths for XDomain connection + * @xd: XDomain connection + * @transmit_path: HopID we are using to send out packets + * @transmit_ring: DMA ring used to send out packets + * @receive_path: HopID the other end is using to send packets to us + * @receive_ring: DMA ring used to receive packets from @receive_path + * + * The function enables DMA paths accordingly so that after successful + * return the caller can send and receive packets using high-speed DMA + * path. If a transmit or receive path is not needed, pass %-1 for those + * parameters. + * + * Return: %0 in case of success and negative errno in case of error + */ +int tb_xdomain_enable_paths(struct tb_xdomain *xd, int transmit_path, + int transmit_ring, int receive_path, + int receive_ring) +{ + return tb_domain_approve_xdomain_paths(xd->tb, xd, transmit_path, + transmit_ring, receive_path, + receive_ring); } EXPORT_SYMBOL_GPL(tb_xdomain_enable_paths); /** * tb_xdomain_disable_paths() - Disable DMA paths for XDomain connection * @xd: XDomain connection + * @transmit_path: HopID we are using to send out packets + * @transmit_ring: DMA ring used to send out packets + * @receive_path: HopID the other end is using to send packets to us + * @receive_ring: DMA ring used to receive packets from @receive_path * * This does the opposite of tb_xdomain_enable_paths(). After call to - * this the caller is not expected to use the rings anymore. + * this the caller is not expected to use the rings anymore. Passing %-1 + * as path/ring parameter means don't care. Normally the callers should + * pass the same values here as they do when paths are enabled. * * Return: %0 in case of success and negative errno in case of error */ -int tb_xdomain_disable_paths(struct tb_xdomain *xd) +int tb_xdomain_disable_paths(struct tb_xdomain *xd, int transmit_path, + int transmit_ring, int receive_path, + int receive_ring) { - int ret = 0; - - mutex_lock(&xd->lock); - if (xd->transmit_path) { - xd->transmit_path = 0; - xd->transmit_ring = 0; - xd->receive_path = 0; - xd->receive_ring = 0; - - ret = tb_domain_disconnect_xdomain_paths(xd->tb, xd); - } - mutex_unlock(&xd->lock); - - return ret; + return tb_domain_disconnect_xdomain_paths(xd->tb, xd, transmit_path, + transmit_ring, receive_path, + receive_ring); } EXPORT_SYMBOL_GPL(tb_xdomain_disable_paths); @@ -1569,35 +2319,6 @@ bool tb_xdomain_handle_request(struct tb *tb, enum tb_cfg_pkg_type type, return ret > 0; } -static int rebuild_property_block(void) -{ - u32 *block, len; - int ret; - - ret = tb_property_format_dir(xdomain_property_dir, NULL, 0); - if (ret < 0) - return ret; - - len = ret; - - block = kcalloc(len, sizeof(u32), GFP_KERNEL); - if (!block) - return -ENOMEM; - - ret = tb_property_format_dir(xdomain_property_dir, block, len); - if (ret) { - kfree(block); - return ret; - } - - kfree(xdomain_property_block); - xdomain_property_block = block; - xdomain_property_block_len = len; - xdomain_property_block_gen++; - - return 0; -} - static int update_xdomain(struct device *dev, void *data) { struct tb_xdomain *xd; @@ -1662,11 +2383,7 @@ int tb_register_property_dir(const char *key, struct tb_property_dir *dir) if (ret) goto err_unlock; - ret = rebuild_property_block(); - if (ret) { - remove_directory(key, dir); - goto err_unlock; - } + xdomain_property_block_gen++; mutex_unlock(&xdomain_lock); update_all_xdomains(); @@ -1692,7 +2409,7 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir) mutex_lock(&xdomain_lock); if (remove_directory(key, dir)) - ret = rebuild_property_block(); + xdomain_property_block_gen++; mutex_unlock(&xdomain_lock); if (!ret) @@ -1702,8 +2419,6 @@ EXPORT_SYMBOL_GPL(tb_unregister_property_dir); int tb_xdomain_init(void) { - int ret; - xdomain_property_dir = tb_property_create_dir(NULL); if (!xdomain_property_dir) return -ENOMEM; @@ -1712,26 +2427,21 @@ int tb_xdomain_init(void) * Initialize standard set of properties without any service * directories. Those will be added by service drivers * themselves when they are loaded. + * + * Rest of the properties are filled dynamically based on these + * when the P2P connection is made. */ tb_property_add_immediate(xdomain_property_dir, "vendorid", PCI_VENDOR_ID_INTEL); tb_property_add_text(xdomain_property_dir, "vendorid", "Intel Corp."); tb_property_add_immediate(xdomain_property_dir, "deviceid", 0x1); - tb_property_add_text(xdomain_property_dir, "deviceid", - utsname()->nodename); tb_property_add_immediate(xdomain_property_dir, "devicerv", 0x80000100); - ret = rebuild_property_block(); - if (ret) { - tb_property_free_dir(xdomain_property_dir); - xdomain_property_dir = NULL; - } - - return ret; + xdomain_property_block_gen = get_random_u32(); + return 0; } void tb_xdomain_exit(void) { - kfree(xdomain_property_block); tb_property_free_dir(xdomain_property_dir); } |