diff options
author | 2020-02-20 16:23:37 +0800 | |
---|---|---|
committer | 2020-02-20 16:23:37 +0800 | |
commit | c95baf12f5077419db01313ab61c2aac007d40cd (patch) | |
tree | 8c2aed3b89aecfb100b0546b601b7c7ae513a974 /drivers/gpu/drm/drm_dp_mst_topology.c | |
parent | drm/i915/gvt: remove unused vblank_done completion (diff) | |
parent | drm/i915/dp: Add all tiled and port sync conns to modeset (diff) | |
download | linux-dev-c95baf12f5077419db01313ab61c2aac007d40cd.tar.xz linux-dev-c95baf12f5077419db01313ab61c2aac007d40cd.zip |
Merge drm-intel-next-queued into gvt-next
Backmerge to pull in
https://patchwork.freedesktop.org/patch/353621/?series=73544&rev=1
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm/drm_dp_mst_topology.c')
-rw-r--r-- | drivers/gpu/drm/drm_dp_mst_topology.c | 1695 |
1 files changed, 1382 insertions, 313 deletions
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 6b14b63b8d62..38bf111e5f9b 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -28,6 +28,13 @@ #include <linux/sched.h> #include <linux/seq_file.h> +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) +#include <linux/stacktrace.h> +#include <linux/sort.h> +#include <linux/timekeeping.h> +#include <linux/math64.h> +#endif + #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_dp_mst_helper.h> @@ -45,6 +52,12 @@ * protocol. The helpers contain a topology manager and bandwidth manager. * The helpers encapsulate the sending and received of sideband msgs. */ +struct drm_dp_pending_up_req { + struct drm_dp_sideband_msg_hdr hdr; + struct drm_dp_sideband_msg_req_body msg; + struct list_head next; +}; + static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, char *buf); @@ -61,8 +74,13 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int offset, int size, u8 *bytes); -static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_branch *mstb); +static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb); + +static void +drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb); + static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *port); @@ -380,7 +398,7 @@ drm_dp_encode_sideband_req(const struct drm_dp_sideband_msg_req_body *req, memcpy(&buf[idx], req->u.i2c_read.transactions[i].bytes, req->u.i2c_read.transactions[i].num_bytes); idx += req->u.i2c_read.transactions[i].num_bytes; - buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 5; + buf[idx] = (req->u.i2c_read.transactions[i].no_stop_bit & 0x1) << 4; buf[idx] |= (req->u.i2c_read.transactions[i].i2c_transaction_delay & 0xf); idx++; } @@ -504,8 +522,10 @@ drm_dp_decode_sideband_req(const struct drm_dp_sideband_msg_tx *raw, } if (failed) { - for (i = 0; i < r->num_transactions; i++) + for (i = 0; i < r->num_transactions; i++) { + tx = &r->transactions[i]; kfree(tx->bytes); + } return -ENOMEM; } @@ -833,6 +853,7 @@ static bool drm_dp_sideband_parse_enum_path_resources_ack(struct drm_dp_sideband { int idx = 1; repmsg->u.path_resources.port_number = (raw->msg[idx] >> 4) & 0xf; + repmsg->u.path_resources.fec_capable = raw->msg[idx] & 0x1; idx++; if (idx > raw->curlen) goto fail_len; @@ -937,6 +958,8 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw, case DP_POWER_DOWN_PHY: case DP_POWER_UP_PHY: return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg); + case DP_CLEAR_PAYLOAD_ID_TABLE: + return true; /* since there's nothing to parse */ default: DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type, drm_dp_mst_req_type_str(msg->req_type)); @@ -1035,6 +1058,15 @@ static int build_link_address(struct drm_dp_sideband_msg_tx *msg) return 0; } +static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg) +{ + struct drm_dp_sideband_msg_req_body req; + + req.req_type = DP_CLEAR_PAYLOAD_ID_TABLE; + drm_dp_encode_sideband_req(&req, msg); + return 0; +} + static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num) { struct drm_dp_sideband_msg_req_body req; @@ -1177,6 +1209,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, txmsg->state == DRM_DP_SIDEBAND_TX_SENT) { mstb->tx_slots[txmsg->seqno] = NULL; } + mgr->is_waiting_for_dwn_reply = false; + } out: if (unlikely(ret == -EIO) && drm_debug_enabled(DRM_UT_DP)) { @@ -1186,6 +1220,7 @@ out: } mutex_unlock(&mgr->qlock); + drm_dp_mst_kick_tx(mgr); return ret; } @@ -1393,39 +1428,194 @@ drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port) } EXPORT_SYMBOL(drm_dp_mst_put_port_malloc); -static void drm_dp_destroy_mst_branch_device(struct kref *kref) +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + +#define STACK_DEPTH 8 + +static noinline void +__topology_ref_save(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_ref_history *history, + enum drm_dp_mst_topology_ref_type type) { - struct drm_dp_mst_branch *mstb = - container_of(kref, struct drm_dp_mst_branch, topology_kref); - struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; - struct drm_dp_mst_port *port, *tmp; - bool wake_tx = false; + struct drm_dp_mst_topology_ref_entry *entry = NULL; + depot_stack_handle_t backtrace; + ulong stack_entries[STACK_DEPTH]; + uint n; + int i; - mutex_lock(&mgr->lock); - list_for_each_entry_safe(port, tmp, &mstb->ports, next) { - list_del(&port->next); - drm_dp_mst_topology_put_port(port); + n = stack_trace_save(stack_entries, ARRAY_SIZE(stack_entries), 1); + backtrace = stack_depot_save(stack_entries, n, GFP_KERNEL); + if (!backtrace) + return; + + /* Try to find an existing entry for this backtrace */ + for (i = 0; i < history->len; i++) { + if (history->entries[i].backtrace == backtrace) { + entry = &history->entries[i]; + break; + } } - mutex_unlock(&mgr->lock); - /* drop any tx slots msg */ - mutex_lock(&mstb->mgr->qlock); - if (mstb->tx_slots[0]) { - mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; - mstb->tx_slots[0] = NULL; - wake_tx = true; + /* Otherwise add one */ + if (!entry) { + struct drm_dp_mst_topology_ref_entry *new; + int new_len = history->len + 1; + + new = krealloc(history->entries, sizeof(*new) * new_len, + GFP_KERNEL); + if (!new) + return; + + entry = &new[history->len]; + history->len = new_len; + history->entries = new; + + entry->backtrace = backtrace; + entry->type = type; + entry->count = 0; } - if (mstb->tx_slots[1]) { - mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; - mstb->tx_slots[1] = NULL; - wake_tx = true; + entry->count++; + entry->ts_nsec = ktime_get_ns(); +} + +static int +topology_ref_history_cmp(const void *a, const void *b) +{ + const struct drm_dp_mst_topology_ref_entry *entry_a = a, *entry_b = b; + + if (entry_a->ts_nsec > entry_b->ts_nsec) + return 1; + else if (entry_a->ts_nsec < entry_b->ts_nsec) + return -1; + else + return 0; +} + +static inline const char * +topology_ref_type_to_str(enum drm_dp_mst_topology_ref_type type) +{ + if (type == DRM_DP_MST_TOPOLOGY_REF_GET) + return "get"; + else + return "put"; +} + +static void +__dump_topology_ref_history(struct drm_dp_mst_topology_ref_history *history, + void *ptr, const char *type_str) +{ + struct drm_printer p = drm_debug_printer(DBG_PREFIX); + char *buf = kzalloc(PAGE_SIZE, GFP_KERNEL); + int i; + + if (!buf) + return; + + if (!history->len) + goto out; + + /* First, sort the list so that it goes from oldest to newest + * reference entry + */ + sort(history->entries, history->len, sizeof(*history->entries), + topology_ref_history_cmp, NULL); + + drm_printf(&p, "%s (%p) topology count reached 0, dumping history:\n", + type_str, ptr); + + for (i = 0; i < history->len; i++) { + const struct drm_dp_mst_topology_ref_entry *entry = + &history->entries[i]; + ulong *entries; + uint nr_entries; + u64 ts_nsec = entry->ts_nsec; + u32 rem_nsec = do_div(ts_nsec, 1000000000); + + nr_entries = stack_depot_fetch(entry->backtrace, &entries); + stack_trace_snprint(buf, PAGE_SIZE, entries, nr_entries, 4); + + drm_printf(&p, " %d %ss (last at %5llu.%06u):\n%s", + entry->count, + topology_ref_type_to_str(entry->type), + ts_nsec, rem_nsec / 1000, buf); } - mutex_unlock(&mstb->mgr->qlock); - if (wake_tx) - wake_up_all(&mstb->mgr->tx_waitq); + /* Now free the history, since this is the only time we expose it */ + kfree(history->entries); +out: + kfree(buf); +} - drm_dp_mst_put_mstb_malloc(mstb); +static __always_inline void +drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) +{ + __dump_topology_ref_history(&mstb->topology_ref_history, mstb, + "MSTB"); +} + +static __always_inline void +drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) +{ + __dump_topology_ref_history(&port->topology_ref_history, port, + "Port"); +} + +static __always_inline void +save_mstb_topology_ref(struct drm_dp_mst_branch *mstb, + enum drm_dp_mst_topology_ref_type type) +{ + __topology_ref_save(mstb->mgr, &mstb->topology_ref_history, type); +} + +static __always_inline void +save_port_topology_ref(struct drm_dp_mst_port *port, + enum drm_dp_mst_topology_ref_type type) +{ + __topology_ref_save(port->mgr, &port->topology_ref_history, type); +} + +static inline void +topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) +{ + mutex_lock(&mgr->topology_ref_history_lock); +} + +static inline void +topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) +{ + mutex_unlock(&mgr->topology_ref_history_lock); +} +#else +static inline void +topology_ref_history_lock(struct drm_dp_mst_topology_mgr *mgr) {} +static inline void +topology_ref_history_unlock(struct drm_dp_mst_topology_mgr *mgr) {} +static inline void +drm_dp_mst_dump_mstb_topology_history(struct drm_dp_mst_branch *mstb) {} +static inline void +drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {} +#define save_mstb_topology_ref(mstb, type) +#define save_port_topology_ref(port, type) +#endif + +static void drm_dp_destroy_mst_branch_device(struct kref *kref) +{ + struct drm_dp_mst_branch *mstb = + container_of(kref, struct drm_dp_mst_branch, topology_kref); + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; + + drm_dp_mst_dump_mstb_topology_history(mstb); + + INIT_LIST_HEAD(&mstb->destroy_next); + + /* + * This can get called under mgr->mutex, so we need to perform the + * actual destruction of the mstb in another worker + */ + mutex_lock(&mgr->delayed_destroy_lock); + list_add(&mstb->destroy_next, &mgr->destroy_branch_device_list); + mutex_unlock(&mgr->delayed_destroy_lock); + schedule_work(&mgr->delayed_destroy_work); } /** @@ -1453,11 +1643,17 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref) static int __must_check drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb) { - int ret = kref_get_unless_zero(&mstb->topology_kref); + int ret; - if (ret) - DRM_DEBUG("mstb %p (%d)\n", mstb, - kref_read(&mstb->topology_kref)); + topology_ref_history_lock(mstb->mgr); + ret = kref_get_unless_zero(&mstb->topology_kref); + if (ret) { + DRM_DEBUG("mstb %p (%d)\n", + mstb, kref_read(&mstb->topology_kref)); + save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET); + } + + topology_ref_history_unlock(mstb->mgr); return ret; } @@ -1478,9 +1674,14 @@ drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb) */ static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb) { + topology_ref_history_lock(mstb->mgr); + + save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET); WARN_ON(kref_read(&mstb->topology_kref) == 0); kref_get(&mstb->topology_kref); DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref)); + + topology_ref_history_unlock(mstb->mgr); } /** @@ -1498,27 +1699,14 @@ static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb) static void drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb) { + topology_ref_history_lock(mstb->mgr); + DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1); - kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device); -} - -static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt) -{ - struct drm_dp_mst_branch *mstb; + save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT); - switch (old_pdt) { - case DP_PEER_DEVICE_DP_LEGACY_CONV: - case DP_PEER_DEVICE_SST_SINK: - /* remove i2c over sideband */ - drm_dp_mst_unregister_i2c_bus(&port->aux); - break; - case DP_PEER_DEVICE_MST_BRANCHING: - mstb = port->mstb; - port->mstb = NULL; - drm_dp_mst_topology_put_mstb(mstb); - break; - } + topology_ref_history_unlock(mstb->mgr); + kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device); } static void drm_dp_destroy_port(struct kref *kref) @@ -1527,31 +1715,24 @@ static void drm_dp_destroy_port(struct kref *kref) container_of(kref, struct drm_dp_mst_port, topology_kref); struct drm_dp_mst_topology_mgr *mgr = port->mgr; - if (!port->input) { - kfree(port->cached_edid); + drm_dp_mst_dump_port_topology_history(port); - /* - * The only time we don't have a connector - * on an output port is if the connector init - * fails. - */ - if (port->connector) { - /* we can't destroy the connector here, as - * we might be holding the mode_config.mutex - * from an EDID retrieval */ - - mutex_lock(&mgr->destroy_connector_lock); - list_add(&port->next, &mgr->destroy_connector_list); - mutex_unlock(&mgr->destroy_connector_lock); - schedule_work(&mgr->destroy_connector_work); - return; - } - /* no need to clean up vcpi - * as if we have no connector we never setup a vcpi */ - drm_dp_port_teardown_pdt(port, port->pdt); - port->pdt = DP_PEER_DEVICE_NONE; + /* There's nothing that needs locking to destroy an input port yet */ + if (port->input) { + drm_dp_mst_put_port_malloc(port); + return; } - drm_dp_mst_put_port_malloc(port); + + kfree(port->cached_edid); + + /* + * we can't destroy the connector here, as we might be holding the + * mode_config.mutex from an EDID retrieval + */ + mutex_lock(&mgr->delayed_destroy_lock); + list_add(&port->next, &mgr->destroy_port_list); + mutex_unlock(&mgr->delayed_destroy_lock); + schedule_work(&mgr->delayed_destroy_work); } /** @@ -1579,12 +1760,17 @@ static void drm_dp_destroy_port(struct kref *kref) static int __must_check drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port) { - int ret = kref_get_unless_zero(&port->topology_kref); + int ret; - if (ret) - DRM_DEBUG("port %p (%d)\n", port, - kref_read(&port->topology_kref)); + topology_ref_history_lock(port->mgr); + ret = kref_get_unless_zero(&port->topology_kref); + if (ret) { + DRM_DEBUG("port %p (%d)\n", + port, kref_read(&port->topology_kref)); + save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET); + } + topology_ref_history_unlock(port->mgr); return ret; } @@ -1603,9 +1789,14 @@ drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port) */ static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port) { + topology_ref_history_lock(port->mgr); + WARN_ON(kref_read(&port->topology_kref) == 0); kref_get(&port->topology_kref); DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref)); + save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET); + + topology_ref_history_unlock(port->mgr); } /** @@ -1621,8 +1812,13 @@ static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port) */ static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port) { + topology_ref_history_lock(port->mgr); + DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref) - 1); + save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT); + + topology_ref_history_unlock(port->mgr); kref_put(&port->topology_kref, drm_dp_destroy_port); } @@ -1739,38 +1935,79 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, return parent_lct + 1; } -/* - * return sends link address for new mstb - */ -static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port) +static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt) { - int ret; - u8 rad[6], lct; - bool send_link = false; + struct drm_dp_mst_topology_mgr *mgr = port->mgr; + struct drm_dp_mst_branch *mstb; + u8 rad[8], lct; + int ret = 0; + + if (port->pdt == new_pdt) + return 0; + + /* Teardown the old pdt, if there is one */ + switch (port->pdt) { + case DP_PEER_DEVICE_DP_LEGACY_CONV: + case DP_PEER_DEVICE_SST_SINK: + /* + * If the new PDT would also have an i2c bus, don't bother + * with reregistering it + */ + if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || + new_pdt == DP_PEER_DEVICE_SST_SINK) { + port->pdt = new_pdt; + return 0; + } + + /* remove i2c over sideband */ + drm_dp_mst_unregister_i2c_bus(&port->aux); + break; + case DP_PEER_DEVICE_MST_BRANCHING: + mutex_lock(&mgr->lock); + drm_dp_mst_topology_put_mstb(port->mstb); + port->mstb = NULL; + mutex_unlock(&mgr->lock); + break; + } + + port->pdt = new_pdt; switch (port->pdt) { case DP_PEER_DEVICE_DP_LEGACY_CONV: case DP_PEER_DEVICE_SST_SINK: /* add i2c over sideband */ ret = drm_dp_mst_register_i2c_bus(&port->aux); break; + case DP_PEER_DEVICE_MST_BRANCHING: lct = drm_dp_calculate_rad(port, rad); + mstb = drm_dp_add_mst_branch_device(lct, rad); + if (!mstb) { + ret = -ENOMEM; + DRM_ERROR("Failed to create MSTB for port %p", port); + goto out; + } - port->mstb = drm_dp_add_mst_branch_device(lct, rad); - if (port->mstb) { - port->mstb->mgr = port->mgr; - port->mstb->port_parent = port; - /* - * Make sure this port's memory allocation stays - * around until its child MSTB releases it - */ - drm_dp_mst_get_port_malloc(port); + mutex_lock(&mgr->lock); + port->mstb = mstb; + mstb->mgr = port->mgr; + mstb->port_parent = port; - send_link = true; - } + /* + * Make sure this port's memory allocation stays + * around until its child MSTB releases it + */ + drm_dp_mst_get_port_malloc(port); + mutex_unlock(&mgr->lock); + + /* And make sure we send a link address for this */ + ret = 1; break; } - return send_link; + +out: + if (ret < 0) + port->pdt = DP_PEER_DEVICE_NONE; + return ret; } /** @@ -1903,44 +2140,134 @@ void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister); static void +drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb, + struct drm_dp_mst_port *port) +{ + struct drm_dp_mst_topology_mgr *mgr = port->mgr; + char proppath[255]; + int ret; + + build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath)); + port->connector = mgr->cbs->add_connector(mgr, port, proppath); + if (!port->connector) { + ret = -ENOMEM; + goto error; + } + + if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || + port->pdt == DP_PEER_DEVICE_SST_SINK) && + port->port_num >= DP_MST_LOGICAL_PORT_0) { + port->cached_edid = drm_get_edid(port->connector, + &port->aux.ddc); + drm_connector_set_tile_property(port->connector); + } + + mgr->cbs->register_connector(port->connector); + return; + +error: + DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret); +} + +/* + * Drop a topology reference, and unlink the port from the in-memory topology + * layout + */ +static void +drm_dp_mst_topology_unlink_port(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) +{ + mutex_lock(&mgr->lock); + port->parent->num_ports--; + list_del(&port->next); + mutex_unlock(&mgr->lock); + drm_dp_mst_topology_put_port(port); +} + +static struct drm_dp_mst_port * +drm_dp_mst_add_port(struct drm_device *dev, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb, u8 port_number) +{ + struct drm_dp_mst_port *port = kzalloc(sizeof(*port), GFP_KERNEL); + + if (!port) + return NULL; + + kref_init(&port->topology_kref); + kref_init(&port->malloc_kref); + port->parent = mstb; + port->port_num = port_number; + port->mgr = mgr; + port->aux.name = "DPMST"; + port->aux.dev = dev->dev; + port->aux.is_remote = true; + + /* initialize the MST downstream port's AUX crc work queue */ + drm_dp_remote_aux_init(&port->aux); + + /* + * Make sure the memory allocation for our parent branch stays + * around until our own memory allocation is released + */ + drm_dp_mst_get_mstb_malloc(mstb); + + return port; +} + +static int drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, struct drm_device *dev, struct drm_dp_link_addr_reply_port *port_msg) { + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; struct drm_dp_mst_port *port; - bool ret; - bool created = false; - int old_pdt = 0; - int old_ddps = 0; + int old_ddps = 0, ret; + u8 new_pdt = DP_PEER_DEVICE_NONE; + bool created = false, send_link_addr = false, changed = false; port = drm_dp_get_port(mstb, port_msg->port_number); if (!port) { - port = kzalloc(sizeof(*port), GFP_KERNEL); + port = drm_dp_mst_add_port(dev, mgr, mstb, + port_msg->port_number); if (!port) - return; - kref_init(&port->topology_kref); - kref_init(&port->malloc_kref); - port->parent = mstb; - port->port_num = port_msg->port_number; - port->mgr = mstb->mgr; - port->aux.name = "DPMST"; - port->aux.dev = dev->dev; - port->aux.is_remote = true; - - /* - * Make sure the memory allocation for our parent branch stays - * around until our own memory allocation is released + return -ENOMEM; + created = true; + changed = true; + } else if (!port->input && port_msg->input_port && port->connector) { + /* Since port->connector can't be changed here, we create a + * new port if input_port changes from 0 to 1 */ - drm_dp_mst_get_mstb_malloc(mstb); - + drm_dp_mst_topology_unlink_port(mgr, port); + drm_dp_mst_topology_put_port(port); + port = drm_dp_mst_add_port(dev, mgr, mstb, + port_msg->port_number); + if (!port) + return -ENOMEM; + changed = true; created = true; - } else { - old_pdt = port->pdt; + } else if (port->input && !port_msg->input_port) { + changed = true; + } else if (port->connector) { + /* We're updating a port that's exposed to userspace, so do it + * under lock + */ + drm_modeset_lock(&mgr->base.lock, NULL); + old_ddps = port->ddps; + changed = port->ddps != port_msg->ddps || + (port->ddps && + (port->ldps != port_msg->legacy_device_plug_status || + port->dpcd_rev != port_msg->dpcd_revision || + port->mcs != port_msg->mcs || + port->pdt != port_msg->peer_device_type || + port->num_sdp_stream_sinks != + port_msg->num_sdp_stream_sinks)); } - port->pdt = port_msg->peer_device_type; port->input = port_msg->input_port; + if (!port->input) + new_pdt = port_msg->peer_device_type; port->mcs = port_msg->mcs; port->ddps = port_msg->ddps; port->ldps = port_msg->legacy_device_plug_status; @@ -1951,78 +2278,106 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, /* manage mstb port lists with mgr lock - take a reference for this list */ if (created) { - mutex_lock(&mstb->mgr->lock); + mutex_lock(&mgr->lock); drm_dp_mst_topology_get_port(port); list_add(&port->next, &mstb->ports); - mutex_unlock(&mstb->mgr->lock); + mstb->num_ports++; + mutex_unlock(&mgr->lock); } if (old_ddps != port->ddps) { if (port->ddps) { if (!port->input) { - drm_dp_send_enum_path_resources(mstb->mgr, - mstb, port); + drm_dp_send_enum_path_resources(mgr, mstb, + port); } } else { port->available_pbn = 0; } } - if (old_pdt != port->pdt && !port->input) { - drm_dp_port_teardown_pdt(port, old_pdt); - - ret = drm_dp_port_setup_pdt(port); - if (ret == true) - drm_dp_send_link_address(mstb->mgr, port->mstb); + ret = drm_dp_port_set_pdt(port, new_pdt); + if (ret == 1) { + send_link_addr = true; + } else if (ret < 0) { + DRM_ERROR("Failed to change PDT on port %p: %d\n", + port, ret); + goto fail; } - if (created && !port->input) { - char proppath[255]; - - build_mst_prop_path(mstb, port->port_num, proppath, - sizeof(proppath)); - port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, - port, - proppath); - if (!port->connector) { - /* remove it from the port list */ - mutex_lock(&mstb->mgr->lock); - list_del(&port->next); - mutex_unlock(&mstb->mgr->lock); - /* drop port list reference */ - drm_dp_mst_topology_put_port(port); - goto out; - } - if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || - port->pdt == DP_PEER_DEVICE_SST_SINK) && - port->port_num >= DP_MST_LOGICAL_PORT_0) { - port->cached_edid = drm_get_edid(port->connector, - &port->aux.ddc); - drm_connector_set_tile_property(port->connector); - } - (*mstb->mgr->cbs->register_connector)(port->connector); + /* + * If this port wasn't just created, then we're reprobing because + * we're coming out of suspend. In this case, always resend the link + * address if there's an MSTB on this port + */ + if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING) + send_link_addr = true; + + if (port->connector) + drm_modeset_unlock(&mgr->base.lock); + else if (!port->input) + drm_dp_mst_port_add_connector(mstb, port); + + if (send_link_addr && port->mstb) { + ret = drm_dp_send_link_address(mgr, port->mstb); + if (ret == 1) /* MSTB below us changed */ + changed = true; + else if (ret < 0) + goto fail_put; } -out: /* put reference to this port */ drm_dp_mst_topology_put_port(port); + return changed; + +fail: + drm_dp_mst_topology_unlink_port(mgr, port); + if (port->connector) + drm_modeset_unlock(&mgr->base.lock); +fail_put: + drm_dp_mst_topology_put_port(port); + return ret; } static void drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, struct drm_dp_connection_status_notify *conn_stat) { + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; struct drm_dp_mst_port *port; - int old_pdt; - int old_ddps; - bool dowork = false; + int old_ddps, old_input, ret, i; + u8 new_pdt; + bool dowork = false, create_connector = false; + port = drm_dp_get_port(mstb, conn_stat->port_number); if (!port) return; + if (port->connector) { + if (!port->input && conn_stat->input_port) { + /* + * We can't remove a connector from an already exposed + * port, so just throw the port out and make sure we + * reprobe the link address of it's parent MSTB + */ + drm_dp_mst_topology_unlink_port(mgr, port); + mstb->link_address_sent = false; + dowork = true; + goto out; + } + + /* Locking is only needed if the port's exposed to userspace */ + drm_modeset_lock(&mgr->base.lock, NULL); + } else if (port->input && !conn_stat->input_port) { + create_connector = true; + /* Reprobe link address so we get num_sdp_streams */ + mstb->link_address_sent = false; + dowork = true; + } + old_ddps = port->ddps; - old_pdt = port->pdt; - port->pdt = conn_stat->peer_device_type; + old_input = port->input; + port->input = conn_stat->input_port; port->mcs = conn_stat->message_capability_status; port->ldps = conn_stat->legacy_device_plug_status; port->ddps = conn_stat->displayport_device_plug_status; @@ -2034,17 +2389,49 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, port->available_pbn = 0; } } - if (old_pdt != port->pdt && !port->input) { - drm_dp_port_teardown_pdt(port, old_pdt); - if (drm_dp_port_setup_pdt(port)) - dowork = true; + new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type; + + ret = drm_dp_port_set_pdt(port, new_pdt); + if (ret == 1) { + dowork = true; + } else if (ret < 0) { + DRM_ERROR("Failed to change PDT for port %p: %d\n", + port, ret); + dowork = false; } + if (!old_input && old_ddps != port->ddps && !port->ddps) { + for (i = 0; i < mgr->max_payloads; i++) { + struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; + struct drm_dp_mst_port *port_validated; + + if (!vcpi) + continue; + + port_validated = + container_of(vcpi, struct drm_dp_mst_port, vcpi); + port_validated = + drm_dp_mst_topology_get_port_validated(mgr, port_validated); + if (!port_validated) { + mutex_lock(&mgr->payload_lock); + vcpi->num_slots = 0; + mutex_unlock(&mgr->payload_lock); + } else { + drm_dp_mst_topology_put_port(port_validated); + } + } + } + + if (port->connector) + drm_modeset_unlock(&mgr->base.lock); + else if (create_connector) + drm_dp_mst_port_add_connector(mstb, port); + +out: drm_dp_mst_topology_put_port(port); if (dowork) queue_work(system_long_wq, &mstb->mgr->work); - } static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr, @@ -2130,42 +2517,67 @@ drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr, return mstb; } -static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, +static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) { struct drm_dp_mst_port *port; - struct drm_dp_mst_branch *mstb_child; - if (!mstb->link_address_sent) - drm_dp_send_link_address(mgr, mstb); + int ret; + bool changed = false; + + if (!mstb->link_address_sent) { + ret = drm_dp_send_link_address(mgr, mstb); + if (ret == 1) + changed = true; + else if (ret < 0) + return ret; + } list_for_each_entry(port, &mstb->ports, next) { - if (port->input) - continue; + struct drm_dp_mst_branch *mstb_child = NULL; - if (!port->ddps) + if (port->input || !port->ddps) continue; - if (!port->available_pbn) + if (!port->available_pbn) { + drm_modeset_lock(&mgr->base.lock, NULL); drm_dp_send_enum_path_resources(mgr, mstb, port); + drm_modeset_unlock(&mgr->base.lock); + changed = true; + } - if (port->mstb) { + if (port->mstb) mstb_child = drm_dp_mst_topology_get_mstb_validated( mgr, port->mstb); - if (mstb_child) { - drm_dp_check_and_send_link_address(mgr, mstb_child); - drm_dp_mst_topology_put_mstb(mstb_child); - } + + if (mstb_child) { + ret = drm_dp_check_and_send_link_address(mgr, + mstb_child); + drm_dp_mst_topology_put_mstb(mstb_child); + if (ret == 1) + changed = true; + else if (ret < 0) + return ret; } } + + return changed; } static void drm_dp_mst_link_probe_work(struct work_struct *work) { - struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work); + struct drm_dp_mst_topology_mgr *mgr = + container_of(work, struct drm_dp_mst_topology_mgr, work); + struct drm_device *dev = mgr->dev; struct drm_dp_mst_branch *mstb; int ret; + bool clear_payload_id_table; + + mutex_lock(&mgr->probe_lock); mutex_lock(&mgr->lock); + clear_payload_id_table = !mgr->payload_id_table_cleared; + mgr->payload_id_table_cleared = true; + mstb = mgr->mst_primary; if (mstb) { ret = drm_dp_mst_topology_try_get_mstb(mstb); @@ -2173,10 +2585,30 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work) mstb = NULL; } mutex_unlock(&mgr->lock); - if (mstb) { - drm_dp_check_and_send_link_address(mgr, mstb); - drm_dp_mst_topology_put_mstb(mstb); + if (!mstb) { + mutex_unlock(&mgr->probe_lock); + return; } + + /* + * Certain branch devices seem to incorrectly report an available_pbn + * of 0 on downstream sinks, even after clearing the + * DP_PAYLOAD_ALLOCATE_* registers in + * drm_dp_mst_topology_mgr_set_mst(). Namely, the CableMatters USB-C + * 2x DP hub. Sending a CLEAR_PAYLOAD_ID_TABLE message seems to make + * things work again. + */ + if (clear_payload_id_table) { + DRM_DEBUG_KMS("Clearing payload ID table\n"); + drm_dp_send_clear_payload_id_table(mgr, mstb); + } + + ret = drm_dp_check_and_send_link_address(mgr, mstb); + drm_dp_mst_topology_put_mstb(mstb); + + mutex_unlock(&mgr->probe_lock); + if (ret) + drm_kms_helper_hotplug_event(dev); } static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, @@ -2353,9 +2785,11 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) ret = process_single_tx_qlock(mgr, txmsg, false); if (ret == 1) { /* txmsg is sent it should be in the slots now */ + mgr->is_waiting_for_dwn_reply = true; list_del(&txmsg->next); } else if (ret) { DRM_DEBUG_KMS("failed to send msg in q %d\n", ret); + mgr->is_waiting_for_dwn_reply = false; list_del(&txmsg->next); if (txmsg->seqno != -1) txmsg->dst->tx_slots[txmsg->seqno] = NULL; @@ -2395,7 +2829,8 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, drm_dp_mst_dump_sideband_msg_tx(&p, txmsg); } - if (list_is_singular(&mgr->tx_msg_downq)) + if (list_is_singular(&mgr->tx_msg_downq) && + !mgr->is_waiting_for_dwn_reply) process_single_down_tx_qlock(mgr); mutex_unlock(&mgr->qlock); } @@ -2422,16 +2857,18 @@ drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply) } } -static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, +static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb) { struct drm_dp_sideband_msg_tx *txmsg; struct drm_dp_link_address_ack_reply *reply; - int i, len, ret; + struct drm_dp_mst_port *port, *tmp; + int i, len, ret, port_mask = 0; + bool changed = false; txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); if (!txmsg) - return; + return -ENOMEM; txmsg->dst = mstb; len = build_link_address(txmsg); @@ -2457,16 +2894,61 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, drm_dp_check_mstb_guid(mstb, reply->guid); - for (i = 0; i < reply->nports; i++) - drm_dp_mst_handle_link_address_port(mstb, mgr->dev, - &reply->ports[i]); + for (i = 0; i < reply->nports; i++) { + port_mask |= BIT(reply->ports[i].port_number); + ret = drm_dp_mst_handle_link_address_port(mstb, mgr->dev, + &reply->ports[i]); + if (ret == 1) + changed = true; + else if (ret < 0) + goto out; + } + + /* Prune any ports that are currently a part of mstb in our in-memory + * topology, but were not seen in this link address. Usually this + * means that they were removed while the topology was out of sync, + * e.g. during suspend/resume + */ + mutex_lock(&mgr->lock); + list_for_each_entry_safe(port, tmp, &mstb->ports, next) { + if (port_mask & BIT(port->port_num)) + continue; - drm_kms_helper_hotplug_event(mgr->dev); + DRM_DEBUG_KMS("port %d was not in link address, removing\n", + port->port_num); + list_del(&port->next); + drm_dp_mst_topology_put_port(port); + changed = true; + } + mutex_unlock(&mgr->lock); out: if (ret <= 0) mstb->link_address_sent = false; kfree(txmsg); + return ret < 0 ? ret : changed; +} + +void drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_branch *mstb) +{ + struct drm_dp_sideband_msg_tx *txmsg; + int len, ret; + + txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); + if (!txmsg) + return; + + txmsg->dst = mstb; + len = build_clear_payload_id_table(txmsg); + + drm_dp_queue_down_tx(mgr, txmsg); + + ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); + if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) + DRM_DEBUG_KMS("clear payload table id nak received\n"); + + kfree(txmsg); } static int @@ -2504,6 +2986,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, path_res->avail_payload_bw_number); port->available_pbn = path_res->avail_payload_bw_number; + port->fec_capable = path_res->fec_capable; } } @@ -2786,9 +3269,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) drm_dp_mst_topology_put_port(port); } - for (i = 0; i < mgr->max_payloads; i++) { - if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) + for (i = 0; i < mgr->max_payloads; /* do nothing */) { + if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) { + i++; continue; + } DRM_DEBUG_KMS("removing payload %d\n", i); for (j = i; j < mgr->max_payloads - 1; j++) { @@ -2996,6 +3481,7 @@ static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count) int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state) { int ret = 0; + int i = 0; struct drm_dp_mst_branch *mstb = NULL; mutex_lock(&mgr->lock); @@ -3056,10 +3542,23 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms /* this can fail if the device is gone */ drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); ret = 0; + mutex_lock(&mgr->payload_lock); memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload)); mgr->payload_mask = 0; set_bit(0, &mgr->payload_mask); + for (i = 0; i < mgr->max_payloads; i++) { + struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; + + if (vcpi) { + vcpi->vcpi = 0; + vcpi->num_slots = 0; + } + mgr->proposed_vcpis[i] = NULL; + } mgr->vcpi_mask = 0; + mutex_unlock(&mgr->payload_lock); + + mgr->payload_id_table_cleared = false; } out_unlock: @@ -3071,6 +3570,23 @@ out_unlock: } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_set_mst); +static void +drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb) +{ + struct drm_dp_mst_port *port; + + /* The link address will need to be re-sent on resume */ + mstb->link_address_sent = false; + + list_for_each_entry(port, &mstb->ports, next) { + /* The PBN for each port will also need to be re-probed */ + port->available_pbn = 0; + + if (port->mstb) + drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb); + } +} + /** * drm_dp_mst_topology_mgr_suspend() - suspend the MST manager * @mgr: manager to suspend @@ -3084,62 +3600,89 @@ void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, DP_MST_EN | DP_UPSTREAM_IS_SRC); mutex_unlock(&mgr->lock); + flush_work(&mgr->up_req_work); flush_work(&mgr->work); - flush_work(&mgr->destroy_connector_work); + flush_work(&mgr->delayed_destroy_work); + + mutex_lock(&mgr->lock); + if (mgr->mst_state && mgr->mst_primary) + drm_dp_mst_topology_mgr_invalidate_mstb(mgr->mst_primary); + mutex_unlock(&mgr->lock); } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_suspend); /** * drm_dp_mst_topology_mgr_resume() - resume the MST manager * @mgr: manager to resume + * @sync: whether or not to perform topology reprobing synchronously * * This will fetch DPCD and see if the device is still there, * if it is, it will rewrite the MSTM control bits, and return. * - * if the device fails this returns -1, and the driver should do + * If the device fails this returns -1, and the driver should do * a full MST reprobe, in case we were undocked. + * + * During system resume (where it is assumed that the driver will be calling + * drm_atomic_helper_resume()) this function should be called beforehand with + * @sync set to true. In contexts like runtime resume where the driver is not + * expected to be calling drm_atomic_helper_resume(), this function should be + * called with @sync set to false in order to avoid deadlocking. + * + * Returns: -1 if the MST topology was removed while we were suspended, 0 + * otherwise. */ -int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) +int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, + bool sync) { - int ret = 0; + int ret; + u8 guid[16]; mutex_lock(&mgr->lock); + if (!mgr->mst_primary) + goto out_fail; - if (mgr->mst_primary) { - int sret; - u8 guid[16]; + ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, + DP_RECEIVER_CAP_SIZE); + if (ret != DP_RECEIVER_CAP_SIZE) { + DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); + goto out_fail; + } - sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); - if (sret != DP_RECEIVER_CAP_SIZE) { - DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); - ret = -1; - goto out_unlock; - } + ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | + DP_UP_REQ_EN | + DP_UPSTREAM_IS_SRC); + if (ret < 0) { + DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n"); + goto out_fail; + } - ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, - DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC); - if (ret < 0) { - DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n"); - ret = -1; - goto out_unlock; - } + /* Some hubs forget their guids after they resume */ + ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); + if (ret != 16) { + DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); + goto out_fail; + } + drm_dp_check_mstb_guid(mgr->mst_primary, guid); - /* Some hubs forget their guids after they resume */ - sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); - if (sret != 16) { - DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n"); - ret = -1; - goto out_unlock; - } - drm_dp_check_mstb_guid(mgr->mst_primary, guid); + /* + * For the final step of resuming the topology, we need to bring the + * state of our in-memory topology back into sync with reality. So, + * restart the probing process as if we're probing a new hub + */ + queue_work(system_long_wq, &mgr->work); + mutex_unlock(&mgr->lock); - ret = 0; - } else - ret = -1; + if (sync) { + DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n"); + flush_work(&mgr->work); + } -out_unlock: + return 0; + +out_fail: mutex_unlock(&mgr->lock); - return ret; + return -1; } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); @@ -3242,6 +3785,7 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) mutex_lock(&mgr->qlock); txmsg->state = DRM_DP_SIDEBAND_TX_RX; mstb->tx_slots[slot] = NULL; + mgr->is_waiting_for_dwn_reply = false; mutex_unlock(&mgr->qlock); wake_up_all(&mgr->tx_waitq); @@ -3251,17 +3795,86 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) no_msg: drm_dp_mst_topology_put_mstb(mstb); clear_down_rep_recv: + mutex_lock(&mgr->qlock); + mgr->is_waiting_for_dwn_reply = false; + mutex_unlock(&mgr->qlock); memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); return 0; } +static inline bool +drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_pending_up_req *up_req) +{ + struct drm_dp_mst_branch *mstb = NULL; + struct drm_dp_sideband_msg_req_body *msg = &up_req->msg; + struct drm_dp_sideband_msg_hdr *hdr = &up_req->hdr; + bool hotplug = false; + + if (hdr->broadcast) { + const u8 *guid = NULL; + + if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) + guid = msg->u.conn_stat.guid; + else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY) + guid = msg->u.resource_stat.guid; + + mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid); + } else { + mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad); + } + + if (!mstb) { + DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", + hdr->lct); + return false; + } + + /* TODO: Add missing handler for DP_RESOURCE_STATUS_NOTIFY events */ + if (msg->req_type == DP_CONNECTION_STATUS_NOTIFY) { + drm_dp_mst_handle_conn_stat(mstb, &msg->u.conn_stat); + hotplug = true; + } + + drm_dp_mst_topology_put_mstb(mstb); + return hotplug; +} + +static void drm_dp_mst_up_req_work(struct work_struct *work) +{ + struct drm_dp_mst_topology_mgr *mgr = + container_of(work, struct drm_dp_mst_topology_mgr, + up_req_work); + struct drm_dp_pending_up_req *up_req; + bool send_hotplug = false; + + mutex_lock(&mgr->probe_lock); + while (true) { + mutex_lock(&mgr->up_req_lock); + up_req = list_first_entry_or_null(&mgr->up_req_list, + struct drm_dp_pending_up_req, + next); + if (up_req) + list_del(&up_req->next); + mutex_unlock(&mgr->up_req_lock); + + if (!up_req) + break; + + send_hotplug |= drm_dp_mst_process_up_req(mgr, up_req); + kfree(up_req); + } + mutex_unlock(&mgr->probe_lock); + + if (send_hotplug) + drm_kms_helper_hotplug_event(mgr->dev); +} + static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) { - struct drm_dp_sideband_msg_req_body msg; struct drm_dp_sideband_msg_hdr *hdr = &mgr->up_req_recv.initial_hdr; - struct drm_dp_mst_branch *mstb = NULL; - const u8 *guid; + struct drm_dp_pending_up_req *up_req; bool seqno; if (!drm_dp_get_one_sb_msg(mgr, true)) @@ -3270,56 +3883,53 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) if (!mgr->up_req_recv.have_eomt) return 0; - if (!hdr->broadcast) { - mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad); - if (!mstb) { - DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", - hdr->lct); - goto out; - } + up_req = kzalloc(sizeof(*up_req), GFP_KERNEL); + if (!up_req) { + DRM_ERROR("Not enough memory to process MST up req\n"); + return -ENOMEM; } + INIT_LIST_HEAD(&up_req->next); seqno = hdr->seqno; - drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg); + drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg); - if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) - guid = msg.u.conn_stat.guid; - else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) - guid = msg.u.resource_stat.guid; - else + if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY && + up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) { + DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n", + up_req->msg.req_type); + kfree(up_req); goto out; - - drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, - false); - - if (!mstb) { - mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid); - if (!mstb) { - DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", - hdr->lct); - goto out; - } } - if (msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { - drm_dp_mst_handle_conn_stat(mstb, &msg.u.conn_stat); + drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type, + seqno, false); + + if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) { + const struct drm_dp_connection_status_notify *conn_stat = + &up_req->msg.u.conn_stat; DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", - msg.u.conn_stat.port_number, - msg.u.conn_stat.legacy_device_plug_status, - msg.u.conn_stat.displayport_device_plug_status, - msg.u.conn_stat.message_capability_status, - msg.u.conn_stat.input_port, - msg.u.conn_stat.peer_device_type); + conn_stat->port_number, + conn_stat->legacy_device_plug_status, + conn_stat->displayport_device_plug_status, + conn_stat->message_capability_status, + conn_stat->input_port, + conn_stat->peer_device_type); + } else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { + const struct drm_dp_resource_status_notify *res_stat = + &up_req->msg.u.resource_stat; - drm_kms_helper_hotplug_event(mgr->dev); - } else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) { DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", - msg.u.resource_stat.port_number, - msg.u.resource_stat.available_pbn); + res_stat->port_number, + res_stat->available_pbn); } - drm_dp_mst_topology_put_mstb(mstb); + up_req->hdr = *hdr; + mutex_lock(&mgr->up_req_lock); + list_add_tail(&up_req->next, &mgr->up_req_list); + mutex_unlock(&mgr->up_req_lock); + queue_work(system_long_wq, &mgr->up_req_work); + out: memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); return 0; @@ -3366,22 +3976,31 @@ EXPORT_SYMBOL(drm_dp_mst_hpd_irq); /** * drm_dp_mst_detect_port() - get connection status for an MST port * @connector: DRM connector for this port + * @ctx: The acquisition context to use for grabbing locks * @mgr: manager for this port - * @port: unverified pointer to a port + * @port: pointer to a port * - * This returns the current connection state for a port. It validates the - * port pointer still exists so the caller doesn't require a reference + * This returns the current connection state for a port. */ -enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, - struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) +int +drm_dp_mst_detect_port(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, + struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port) { - enum drm_connector_status status = connector_status_disconnected; + int ret; /* we need to search for the port in the mgr in case it's gone */ port = drm_dp_mst_topology_get_port_validated(mgr, port); if (!port) return connector_status_disconnected; + ret = drm_modeset_lock(&mgr->base.lock, ctx); + if (ret) + goto out; + + ret = connector_status_disconnected; + if (!port->ddps) goto out; @@ -3391,7 +4010,7 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector break; case DP_PEER_DEVICE_SST_SINK: - status = connector_status_connected; + ret = connector_status_connected; /* for logical ports - cache the EDID */ if (port->port_num >= 8 && !port->cached_edid) { port->cached_edid = drm_get_edid(connector, &port->aux.ddc); @@ -3399,12 +4018,12 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector break; case DP_PEER_DEVICE_DP_LEGACY_CONV: if (port->ldps) - status = connector_status_connected; + ret = connector_status_connected; break; } out: drm_dp_mst_topology_put_port(port); - return status; + return ret; } EXPORT_SYMBOL(drm_dp_mst_detect_port); @@ -3510,6 +4129,7 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr, * @mgr: MST topology manager for the port * @port: port to find vcpi slots for * @pbn: bandwidth required for the mode in PBN + * @pbn_div: divider for DSC mode that takes FEC into account * * Allocates VCPI slots to @port, replacing any previous VCPI allocations it * may have had. Any atomic drivers which support MST must call this function @@ -3536,11 +4156,12 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr, */ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port, int pbn) + struct drm_dp_mst_port *port, int pbn, + int pbn_div) { struct drm_dp_mst_topology_state *topology_state; struct drm_dp_vcpi_allocation *pos, *vcpi = NULL; - int prev_slots, req_slots, ret; + int prev_slots, prev_bw, req_slots; topology_state = drm_atomic_get_mst_topology_state(state, mgr); if (IS_ERR(topology_state)) @@ -3551,6 +4172,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, if (pos->port == port) { vcpi = pos; prev_slots = vcpi->vcpi; + prev_bw = vcpi->pbn; /* * This should never happen, unless the driver tries @@ -3566,14 +4188,22 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, break; } } - if (!vcpi) + if (!vcpi) { prev_slots = 0; + prev_bw = 0; + } + + if (pbn_div <= 0) + pbn_div = mgr->pbn_div; - req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); + req_slots = DIV_ROUND_UP(pbn, pbn_div); DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n", port->connector->base.id, port->connector->name, port, prev_slots, req_slots); + DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n", + port->connector->base.id, port->connector->name, + port, prev_bw, pbn); /* Add the new allocation to the state */ if (!vcpi) { @@ -3586,9 +4216,9 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, list_add(&vcpi->next, &topology_state->vcpis); } vcpi->vcpi = req_slots; + vcpi->pbn = pbn; - ret = req_slots; - return ret; + return req_slots; } EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots); @@ -3837,10 +4467,11 @@ EXPORT_SYMBOL(drm_dp_check_act_status); * drm_dp_calc_pbn_mode() - Calculate the PBN for a mode. * @clock: dot clock for the mode * @bpp: bpp for the mode. + * @dsc: DSC mode. If true, bpp has units of 1/16 of a bit per pixel * * This uses the formula in the spec to calculate the PBN value for a mode. */ -int drm_dp_calc_pbn_mode(int clock, int bpp) +int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc) { /* * margin 5300ppm + 300ppm ~ 0.6% as per spec, factor is 1.006 @@ -3851,7 +4482,16 @@ int drm_dp_calc_pbn_mode(int clock, int bpp) * peak_kbps *= (1006/1000) * peak_kbps *= (64/54) * peak_kbps *= 8 convert to bytes + * + * If the bpp is in units of 1/16, further divide by 16. Put this + * factor in the numerator rather than the denominator to avoid + * integer overflow */ + + if (dsc) + return DIV_ROUND_UP_ULL(mul_u32_u32(clock * (bpp / 16), 64 * 1006), + 8 * 54 * 1000 * 1000); + return DIV_ROUND_UP_ULL(mul_u32_u32(clock * bpp, 64 * 1006), 8 * 54 * 1000 * 1000); } @@ -3990,39 +4630,108 @@ static void drm_dp_tx_work(struct work_struct *work) struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); mutex_lock(&mgr->qlock); - if (!list_empty(&mgr->tx_msg_downq)) + if (!list_empty(&mgr->tx_msg_downq) && !mgr->is_waiting_for_dwn_reply) process_single_down_tx_qlock(mgr); mutex_unlock(&mgr->qlock); } -static void drm_dp_destroy_connector_work(struct work_struct *work) +static inline void +drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port) { - struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work); - struct drm_dp_mst_port *port; - bool send_hotplug = false; + if (port->connector) + port->mgr->cbs->destroy_connector(port->mgr, port->connector); + + drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE); + drm_dp_mst_put_port_malloc(port); +} + +static inline void +drm_dp_delayed_destroy_mstb(struct drm_dp_mst_branch *mstb) +{ + struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; + struct drm_dp_mst_port *port, *tmp; + bool wake_tx = false; + + mutex_lock(&mgr->lock); + list_for_each_entry_safe(port, tmp, &mstb->ports, next) { + list_del(&port->next); + drm_dp_mst_topology_put_port(port); + } + mutex_unlock(&mgr->lock); + + /* drop any tx slots msg */ + mutex_lock(&mstb->mgr->qlock); + if (mstb->tx_slots[0]) { + mstb->tx_slots[0]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; + mstb->tx_slots[0] = NULL; + wake_tx = true; + } + if (mstb->tx_slots[1]) { + mstb->tx_slots[1]->state = DRM_DP_SIDEBAND_TX_TIMEOUT; + mstb->tx_slots[1] = NULL; + wake_tx = true; + } + mutex_unlock(&mstb->mgr->qlock); + + if (wake_tx) + wake_up_all(&mstb->mgr->tx_waitq); + + drm_dp_mst_put_mstb_malloc(mstb); +} + +static void drm_dp_delayed_destroy_work(struct work_struct *work) +{ + struct drm_dp_mst_topology_mgr *mgr = + container_of(work, struct drm_dp_mst_topology_mgr, + delayed_destroy_work); + bool send_hotplug = false, go_again; + /* * Not a regular list traverse as we have to drop the destroy - * connector lock before destroying the connector, to avoid AB->BA + * connector lock before destroying the mstb/port, to avoid AB->BA * ordering between this lock and the config mutex. */ - for (;;) { - mutex_lock(&mgr->destroy_connector_lock); - port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next); - if (!port) { - mutex_unlock(&mgr->destroy_connector_lock); - break; + do { + go_again = false; + + for (;;) { + struct drm_dp_mst_branch *mstb; + + mutex_lock(&mgr->delayed_destroy_lock); + mstb = list_first_entry_or_null(&mgr->destroy_branch_device_list, + struct drm_dp_mst_branch, + destroy_next); + if (mstb) + list_del(&mstb->destroy_next); + mutex_unlock(&mgr->delayed_destroy_lock); + + if (!mstb) + break; + + drm_dp_delayed_destroy_mstb(mstb); + go_again = true; } - list_del(&port->next); - mutex_unlock(&mgr->destroy_connector_lock); - mgr->cbs->destroy_connector(mgr, port->connector); + for (;;) { + struct drm_dp_mst_port *port; - drm_dp_port_teardown_pdt(port, port->pdt); - port->pdt = DP_PEER_DEVICE_NONE; + mutex_lock(&mgr->delayed_destroy_lock); + port = list_first_entry_or_null(&mgr->destroy_port_list, + struct drm_dp_mst_port, + next); + if (port) + list_del(&port->next); + mutex_unlock(&mgr->delayed_destroy_lock); + + if (!port) + break; + + drm_dp_delayed_destroy_port(port); + send_hotplug = true; + go_again = true; + } + } while (go_again); - drm_dp_mst_put_port_malloc(port); - send_hotplug = true; - } if (send_hotplug) drm_kms_helper_hotplug_event(mgr->dev); } @@ -4084,9 +4793,61 @@ static void drm_dp_mst_destroy_state(struct drm_private_obj *obj, kfree(mst_state); } +static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port, + struct drm_dp_mst_branch *branch) +{ + while (port->parent) { + if (port->parent == branch) + return true; + + if (port->parent->port_parent) + port = port->parent->port_parent; + else + break; + } + return false; +} + +static inline +int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch, + struct drm_dp_mst_topology_state *mst_state) +{ + struct drm_dp_mst_port *port; + struct drm_dp_vcpi_allocation *vcpi; + int pbn_limit = 0, pbn_used = 0; + + list_for_each_entry(port, &branch->ports, next) { + if (port->mstb) + if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state)) + return -ENOSPC; + + if (port->available_pbn > 0) + pbn_limit = port->available_pbn; + } + DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n", + branch, pbn_limit); + + list_for_each_entry(vcpi, &mst_state->vcpis, next) { + if (!vcpi->pbn) + continue; + + if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch)) + pbn_used += vcpi->pbn; + } + DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n", + branch, pbn_used); + + if (pbn_used > pbn_limit) { + DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n", + branch); + return -ENOSPC; + } + return 0; +} + static inline int -drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_topology_state *mst_state) +drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state) { struct drm_dp_vcpi_allocation *vcpi; int avail_slots = 63, payload_count = 0; @@ -4124,6 +4885,128 @@ drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr, } /** + * drm_dp_mst_add_affected_dsc_crtcs + * @state: Pointer to the new struct drm_dp_mst_topology_state + * @mgr: MST topology manager + * + * Whenever there is a change in mst topology + * DSC configuration would have to be recalculated + * therefore we need to trigger modeset on all affected + * CRTCs in that topology + * + * See also: + * drm_dp_mst_atomic_enable_dsc() + */ +int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr) +{ + struct drm_dp_mst_topology_state *mst_state; + struct drm_dp_vcpi_allocation *pos; + struct drm_connector *connector; + struct drm_connector_state *conn_state; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + + mst_state = drm_atomic_get_mst_topology_state(state, mgr); + + if (IS_ERR(mst_state)) + return -EINVAL; + + list_for_each_entry(pos, &mst_state->vcpis, next) { + + connector = pos->port->connector; + + if (!connector) + return -EINVAL; + + conn_state = drm_atomic_get_connector_state(state, connector); + + if (IS_ERR(conn_state)) + return PTR_ERR(conn_state); + + crtc = conn_state->crtc; + + if (WARN_ON(!crtc)) + return -EINVAL; + + if (!drm_dp_mst_dsc_aux_for_port(pos->port)) + continue; + + crtc_state = drm_atomic_get_crtc_state(mst_state->base.state, crtc); + + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n", + mgr, crtc); + + crtc_state->mode_changed = true; + } + return 0; +} +EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs); + +/** + * drm_dp_mst_atomic_enable_dsc - Set DSC Enable Flag to On/Off + * @state: Pointer to the new drm_atomic_state + * @port: Pointer to the affected MST Port + * @pbn: Newly recalculated bw required for link with DSC enabled + * @pbn_div: Divider to calculate correct number of pbn per slot + * @enable: Boolean flag to enable or disable DSC on the port + * + * This function enables DSC on the given Port + * by recalculating its vcpi from pbn provided + * and sets dsc_enable flag to keep track of which + * ports have DSC enabled + * + */ +int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state, + struct drm_dp_mst_port *port, + int pbn, int pbn_div, + bool enable) +{ + struct drm_dp_mst_topology_state *mst_state; + struct drm_dp_vcpi_allocation *pos; + bool found = false; + int vcpi = 0; + + mst_state = drm_atomic_get_mst_topology_state(state, port->mgr); + + if (IS_ERR(mst_state)) + return PTR_ERR(mst_state); + + list_for_each_entry(pos, &mst_state->vcpis, next) { + if (pos->port == port) { + found = true; + break; + } + } + + if (!found) { + DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n", + port, mst_state); + return -EINVAL; + } + + if (pos->dsc_enabled == enable) { + DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n", + port, enable, pos->vcpi); + vcpi = pos->vcpi; + } + + if (enable) { + vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div); + DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n", + port, vcpi); + if (vcpi < 0) + return -EINVAL; + } + + pos->dsc_enabled = enable; + + return vcpi; +} +EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc); +/** * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an * atomic update is valid * @state: Pointer to the new &struct drm_dp_mst_topology_state @@ -4151,7 +5034,13 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state) int i, ret = 0; for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { - ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state); + if (!mgr->mst_state) + continue; + + ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state); + if (ret) + break; + ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state); if (ret) break; } @@ -4184,9 +5073,6 @@ EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs); struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr) { - struct drm_device *dev = mgr->dev; - - WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex)); return to_dp_mst_topology_state(drm_atomic_get_private_obj_state(state, &mgr->base)); } EXPORT_SYMBOL(drm_atomic_get_mst_topology_state); @@ -4212,12 +5098,20 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, mutex_init(&mgr->lock); mutex_init(&mgr->qlock); mutex_init(&mgr->payload_lock); - mutex_init(&mgr->destroy_connector_lock); + mutex_init(&mgr->delayed_destroy_lock); + mutex_init(&mgr->up_req_lock); + mutex_init(&mgr->probe_lock); +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + mutex_init(&mgr->topology_ref_history_lock); +#endif INIT_LIST_HEAD(&mgr->tx_msg_downq); - INIT_LIST_HEAD(&mgr->destroy_connector_list); + INIT_LIST_HEAD(&mgr->destroy_port_list); + INIT_LIST_HEAD(&mgr->destroy_branch_device_list); + INIT_LIST_HEAD(&mgr->up_req_list); INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); INIT_WORK(&mgr->tx_work, drm_dp_tx_work); - INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work); + INIT_WORK(&mgr->delayed_destroy_work, drm_dp_delayed_destroy_work); + INIT_WORK(&mgr->up_req_work, drm_dp_mst_up_req_work); init_waitqueue_head(&mgr->tx_waitq); mgr->dev = dev; mgr->aux = aux; @@ -4258,7 +5152,7 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) { drm_dp_mst_topology_mgr_set_mst(mgr, false); flush_work(&mgr->work); - flush_work(&mgr->destroy_connector_work); + cancel_work_sync(&mgr->delayed_destroy_work); mutex_lock(&mgr->payload_lock); kfree(mgr->payloads); mgr->payloads = NULL; @@ -4270,10 +5164,15 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) drm_atomic_private_obj_fini(&mgr->base); mgr->funcs = NULL; - mutex_destroy(&mgr->destroy_connector_lock); + mutex_destroy(&mgr->delayed_destroy_lock); mutex_destroy(&mgr->payload_lock); mutex_destroy(&mgr->qlock); mutex_destroy(&mgr->lock); + mutex_destroy(&mgr->up_req_lock); + mutex_destroy(&mgr->probe_lock); +#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) + mutex_destroy(&mgr->topology_ref_history_lock); +#endif } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy); @@ -4405,3 +5304,173 @@ static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux) { i2c_del_adapter(&aux->ddc); } + +/** + * drm_dp_mst_is_virtual_dpcd() - Is the given port a virtual DP Peer Device + * @port: The port to check + * + * A single physical MST hub object can be represented in the topology + * by multiple branches, with virtual ports between those branches. + * + * As of DP1.4, An MST hub with internal (virtual) ports must expose + * certain DPCD registers over those ports. See sections 2.6.1.1.1 + * and 2.6.1.1.2 of Display Port specification v1.4 for details. + * + * May acquire mgr->lock + * + * Returns: + * true if the port is a virtual DP peer device, false otherwise + */ +static bool drm_dp_mst_is_virtual_dpcd(struct drm_dp_mst_port *port) +{ + struct drm_dp_mst_port *downstream_port; + + if (!port || port->dpcd_rev < DP_DPCD_REV_14) + return false; + + /* Virtual DP Sink (Internal Display Panel) */ + if (port->port_num >= 8) + return true; + + /* DP-to-HDMI Protocol Converter */ + if (port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV && + !port->mcs && + port->ldps) + return true; + + /* DP-to-DP */ + mutex_lock(&port->mgr->lock); + if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING && + port->mstb && + port->mstb->num_ports == 2) { + list_for_each_entry(downstream_port, &port->mstb->ports, next) { + if (downstream_port->pdt == DP_PEER_DEVICE_SST_SINK && + !downstream_port->input) { + mutex_unlock(&port->mgr->lock); + return true; + } + } + } + mutex_unlock(&port->mgr->lock); + + return false; +} + +/** + * drm_dp_mst_dsc_aux_for_port() - Find the correct aux for DSC + * @port: The port to check. A leaf of the MST tree with an attached display. + * + * Depending on the situation, DSC may be enabled via the endpoint aux, + * the immediately upstream aux, or the connector's physical aux. + * + * This is both the correct aux to read DSC_CAPABILITY and the + * correct aux to write DSC_ENABLED. + * + * This operation can be expensive (up to four aux reads), so + * the caller should cache the return. + * + * Returns: + * NULL if DSC cannot be enabled on this port, otherwise the aux device + */ +struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) +{ + struct drm_dp_mst_port *immediate_upstream_port; + struct drm_dp_mst_port *fec_port; + struct drm_dp_desc desc = { 0 }; + u8 endpoint_fec; + u8 endpoint_dsc; + + if (!port) + return NULL; + + if (port->parent->port_parent) + immediate_upstream_port = port->parent->port_parent; + else + immediate_upstream_port = NULL; + + fec_port = immediate_upstream_port; + while (fec_port) { + /* + * Each physical link (i.e. not a virtual port) between the + * output and the primary device must support FEC + */ + if (!drm_dp_mst_is_virtual_dpcd(fec_port) && + !fec_port->fec_capable) + return NULL; + + fec_port = fec_port->parent->port_parent; + } + + /* DP-to-DP peer device */ + if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) { + u8 upstream_dsc; + + if (drm_dp_dpcd_read(&port->aux, + DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1) + return NULL; + if (drm_dp_dpcd_read(&port->aux, + DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1) + return NULL; + if (drm_dp_dpcd_read(&immediate_upstream_port->aux, + DP_DSC_SUPPORT, &upstream_dsc, 1) != 1) + return NULL; + + /* Enpoint decompression with DP-to-DP peer device */ + if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) && + (endpoint_fec & DP_FEC_CAPABLE) && + (upstream_dsc & 0x2) /* DSC passthrough */) + return &port->aux; + + /* Virtual DPCD decompression with DP-to-DP peer device */ + return &immediate_upstream_port->aux; + } + + /* Virtual DPCD decompression with DP-to-HDMI or Virtual DP Sink */ + if (drm_dp_mst_is_virtual_dpcd(port)) + return &port->aux; + + /* + * Synaptics quirk + * Applies to ports for which: + * - Physical aux has Synaptics OUI + * - DPv1.4 or higher + * - Port is on primary branch device + * - Not a VGA adapter (DP_DWN_STRM_PORT_TYPE_ANALOG) + */ + if (drm_dp_read_desc(port->mgr->aux, &desc, true)) + return NULL; + + if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) && + port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 && + port->parent == port->mgr->mst_primary) { + u8 downstreamport; + + if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT, + &downstreamport, 1) < 0) + return NULL; + + if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) && + ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK) + != DP_DWN_STRM_PORT_TYPE_ANALOG)) + return port->mgr->aux; + } + + /* + * The check below verifies if the MST sink + * connected to the GPU is capable of DSC - + * therefore the endpoint needs to be + * both DSC and FEC capable. + */ + if (drm_dp_dpcd_read(&port->aux, + DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1) + return NULL; + if (drm_dp_dpcd_read(&port->aux, + DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1) + return NULL; + if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) && + (endpoint_fec & DP_FEC_CAPABLE)) + return &port->aux; + + return NULL; +} +EXPORT_SYMBOL(drm_dp_mst_dsc_aux_for_port); |