aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/drm_dp_mst_topology.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/drm_dp_mst_topology.c')
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c1153
1 files changed, 900 insertions, 253 deletions
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 529414556962..dc7ac0c60547 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -33,6 +33,7 @@
#include <drm/drm_fixed.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_probe_helper.h>
/**
* DOC: dp mst helper
@@ -45,7 +46,7 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
char *buf);
static int test_calc_pbn_mode(void);
-static void drm_dp_put_port(struct drm_dp_mst_port *port);
+static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
int id,
@@ -66,6 +67,64 @@ static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
static int drm_dp_mst_register_i2c_bus(struct drm_dp_aux *aux);
static void drm_dp_mst_unregister_i2c_bus(struct drm_dp_aux *aux);
static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
+
+#define DP_STR(x) [DP_ ## x] = #x
+
+static const char *drm_dp_mst_req_type_str(u8 req_type)
+{
+ static const char * const req_type_str[] = {
+ DP_STR(GET_MSG_TRANSACTION_VERSION),
+ DP_STR(LINK_ADDRESS),
+ DP_STR(CONNECTION_STATUS_NOTIFY),
+ DP_STR(ENUM_PATH_RESOURCES),
+ DP_STR(ALLOCATE_PAYLOAD),
+ DP_STR(QUERY_PAYLOAD),
+ DP_STR(RESOURCE_STATUS_NOTIFY),
+ DP_STR(CLEAR_PAYLOAD_ID_TABLE),
+ DP_STR(REMOTE_DPCD_READ),
+ DP_STR(REMOTE_DPCD_WRITE),
+ DP_STR(REMOTE_I2C_READ),
+ DP_STR(REMOTE_I2C_WRITE),
+ DP_STR(POWER_UP_PHY),
+ DP_STR(POWER_DOWN_PHY),
+ DP_STR(SINK_EVENT_NOTIFY),
+ DP_STR(QUERY_STREAM_ENC_STATUS),
+ };
+
+ if (req_type >= ARRAY_SIZE(req_type_str) ||
+ !req_type_str[req_type])
+ return "unknown";
+
+ return req_type_str[req_type];
+}
+
+#undef DP_STR
+#define DP_STR(x) [DP_NAK_ ## x] = #x
+
+static const char *drm_dp_mst_nak_reason_str(u8 nak_reason)
+{
+ static const char * const nak_reason_str[] = {
+ DP_STR(WRITE_FAILURE),
+ DP_STR(INVALID_READ),
+ DP_STR(CRC_FAILURE),
+ DP_STR(BAD_PARAM),
+ DP_STR(DEFER),
+ DP_STR(LINK_FAILURE),
+ DP_STR(NO_RESOURCES),
+ DP_STR(DPCD_FAIL),
+ DP_STR(I2C_NAK),
+ DP_STR(ALLOCATE_FAIL),
+ };
+
+ if (nak_reason >= ARRAY_SIZE(nak_reason_str) ||
+ !nak_reason_str[nak_reason])
+ return "unknown";
+
+ return nak_reason_str[nak_reason];
+}
+
+#undef DP_STR
+
/* sideband msg handling */
static u8 drm_dp_msg_header_crc4(const uint8_t *data, size_t num_nibbles)
{
@@ -567,7 +626,7 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
msg->reply_type = (raw->msg[0] & 0x80) >> 7;
msg->req_type = (raw->msg[0] & 0x7f);
- if (msg->reply_type) {
+ if (msg->reply_type == DP_SIDEBAND_REPLY_NAK) {
memcpy(msg->u.nak.guid, &raw->msg[1], 16);
msg->u.nak.reason = raw->msg[17];
msg->u.nak.nak_data = raw->msg[18];
@@ -593,7 +652,8 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
case DP_POWER_UP_PHY:
return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
default:
- DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
+ DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
+ drm_dp_mst_req_type_str(msg->req_type));
return false;
}
}
@@ -660,7 +720,8 @@ static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
case DP_RESOURCE_STATUS_NOTIFY:
return drm_dp_sideband_parse_resource_status_notify(raw, msg);
default:
- DRM_ERROR("Got unknown request 0x%02x\n", msg->req_type);
+ DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
+ drm_dp_mst_req_type_str(msg->req_type));
return false;
}
}
@@ -849,46 +910,212 @@ static struct drm_dp_mst_branch *drm_dp_add_mst_branch_device(u8 lct, u8 *rad)
if (lct > 1)
memcpy(mstb->rad, rad, lct / 2);
INIT_LIST_HEAD(&mstb->ports);
- kref_init(&mstb->kref);
+ kref_init(&mstb->topology_kref);
+ kref_init(&mstb->malloc_kref);
return mstb;
}
-static void drm_dp_free_mst_port(struct kref *kref);
-
static void drm_dp_free_mst_branch_device(struct kref *kref)
{
- struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
- if (mstb->port_parent) {
- if (list_empty(&mstb->port_parent->next))
- kref_put(&mstb->port_parent->kref, drm_dp_free_mst_port);
- }
+ struct drm_dp_mst_branch *mstb =
+ container_of(kref, struct drm_dp_mst_branch, malloc_kref);
+
+ if (mstb->port_parent)
+ drm_dp_mst_put_port_malloc(mstb->port_parent);
+
kfree(mstb);
}
+/**
+ * DOC: Branch device and port refcounting
+ *
+ * Topology refcount overview
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * The refcounting schemes for &struct drm_dp_mst_branch and &struct
+ * drm_dp_mst_port are somewhat unusual. Both ports and branch devices have
+ * two different kinds of refcounts: topology refcounts, and malloc refcounts.
+ *
+ * Topology refcounts are not exposed to drivers, and are handled internally
+ * by the DP MST helpers. The helpers use them in order to prevent the
+ * in-memory topology state from being changed in the middle of critical
+ * operations like changing the internal state of payload allocations. This
+ * means each branch and port will be considered to be connected to the rest
+ * of the topology until its topology refcount reaches zero. Additionally,
+ * for ports this means that their associated &struct drm_connector will stay
+ * registered with userspace until the port's refcount reaches 0.
+ *
+ * Malloc refcount overview
+ * ~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Malloc references are used to keep a &struct drm_dp_mst_port or &struct
+ * drm_dp_mst_branch allocated even after all of its topology references have
+ * been dropped, so that the driver or MST helpers can safely access each
+ * branch's last known state before it was disconnected from the topology.
+ * When the malloc refcount of a port or branch reaches 0, the memory
+ * allocation containing the &struct drm_dp_mst_branch or &struct
+ * drm_dp_mst_port respectively will be freed.
+ *
+ * For &struct drm_dp_mst_branch, malloc refcounts are not currently exposed
+ * to drivers. As of writing this documentation, there are no drivers that
+ * have a usecase for accessing &struct drm_dp_mst_branch outside of the MST
+ * helpers. Exposing this API to drivers in a race-free manner would take more
+ * tweaking of the refcounting scheme, however patches are welcome provided
+ * there is a legitimate driver usecase for this.
+ *
+ * Refcount relationships in a topology
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
+ * Let's take a look at why the relationship between topology and malloc
+ * refcounts is designed the way it is.
+ *
+ * .. kernel-figure:: dp-mst/topology-figure-1.dot
+ *
+ * An example of topology and malloc refs in a DP MST topology with two
+ * active payloads. Topology refcount increments are indicated by solid
+ * lines, and malloc refcount increments are indicated by dashed lines.
+ * Each starts from the branch which incremented the refcount, and ends at
+ * the branch to which the refcount belongs to, i.e. the arrow points the
+ * same way as the C pointers used to reference a structure.
+ *
+ * As you can see in the above figure, every branch increments the topology
+ * refcount of its children, and increments the malloc refcount of its
+ * parent. Additionally, every payload increments the malloc refcount of its
+ * assigned port by 1.
+ *
+ * So, what would happen if MSTB #3 from the above figure was unplugged from
+ * the system, but the driver hadn't yet removed payload #2 from port #3? The
+ * topology would start to look like the figure below.
+ *
+ * .. kernel-figure:: dp-mst/topology-figure-2.dot
+ *
+ * Ports and branch devices which have been released from memory are
+ * colored grey, and references which have been removed are colored red.
+ *
+ * Whenever a port or branch device's topology refcount reaches zero, it will
+ * decrement the topology refcounts of all its children, the malloc refcount
+ * of its parent, and finally its own malloc refcount. For MSTB #4 and port
+ * #4, this means they both have been disconnected from the topology and freed
+ * from memory. But, because payload #2 is still holding a reference to port
+ * #3, port #3 is removed from the topology but its &struct drm_dp_mst_port
+ * is still accessible from memory. This also means port #3 has not yet
+ * decremented the malloc refcount of MSTB #3, so its &struct
+ * drm_dp_mst_branch will also stay allocated in memory until port #3's
+ * malloc refcount reaches 0.
+ *
+ * This relationship is necessary because in order to release payload #2, we
+ * need to be able to figure out the last relative of port #3 that's still
+ * connected to the topology. In this case, we would travel up the topology as
+ * shown below.
+ *
+ * .. kernel-figure:: dp-mst/topology-figure-3.dot
+ *
+ * And finally, remove payload #2 by communicating with port #2 through
+ * sideband transactions.
+ */
+
+/**
+ * drm_dp_mst_get_mstb_malloc() - Increment the malloc refcount of a branch
+ * device
+ * @mstb: The &struct drm_dp_mst_branch to increment the malloc refcount of
+ *
+ * Increments &drm_dp_mst_branch.malloc_kref. When
+ * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
+ * will be released and @mstb may no longer be used.
+ *
+ * See also: drm_dp_mst_put_mstb_malloc()
+ */
+static void
+drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
+{
+ kref_get(&mstb->malloc_kref);
+ DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
+}
+
+/**
+ * drm_dp_mst_put_mstb_malloc() - Decrement the malloc refcount of a branch
+ * device
+ * @mstb: The &struct drm_dp_mst_branch to decrement the malloc refcount of
+ *
+ * Decrements &drm_dp_mst_branch.malloc_kref. When
+ * &drm_dp_mst_branch.malloc_kref reaches 0, the memory allocation for @mstb
+ * will be released and @mstb may no longer be used.
+ *
+ * See also: drm_dp_mst_get_mstb_malloc()
+ */
+static void
+drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
+{
+ DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
+ kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
+}
+
+static void drm_dp_free_mst_port(struct kref *kref)
+{
+ struct drm_dp_mst_port *port =
+ container_of(kref, struct drm_dp_mst_port, malloc_kref);
+
+ drm_dp_mst_put_mstb_malloc(port->parent);
+ kfree(port);
+}
+
+/**
+ * drm_dp_mst_get_port_malloc() - Increment the malloc refcount of an MST port
+ * @port: The &struct drm_dp_mst_port to increment the malloc refcount of
+ *
+ * Increments &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
+ * reaches 0, the memory allocation for @port will be released and @port may
+ * no longer be used.
+ *
+ * Because @port could potentially be freed at any time by the DP MST helpers
+ * if &drm_dp_mst_port.malloc_kref reaches 0, including during a call to this
+ * function, drivers that which to make use of &struct drm_dp_mst_port should
+ * ensure that they grab at least one main malloc reference to their MST ports
+ * in &drm_dp_mst_topology_cbs.add_connector. This callback is called before
+ * there is any chance for &drm_dp_mst_port.malloc_kref to reach 0.
+ *
+ * See also: drm_dp_mst_put_port_malloc()
+ */
+void
+drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
+{
+ kref_get(&port->malloc_kref);
+ DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
+}
+EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
+
+/**
+ * drm_dp_mst_put_port_malloc() - Decrement the malloc refcount of an MST port
+ * @port: The &struct drm_dp_mst_port to decrement the malloc refcount of
+ *
+ * Decrements &drm_dp_mst_port.malloc_kref. When &drm_dp_mst_port.malloc_kref
+ * reaches 0, the memory allocation for @port will be released and @port may
+ * no longer be used.
+ *
+ * See also: drm_dp_mst_get_port_malloc()
+ */
+void
+drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
+{
+ DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
+ kref_put(&port->malloc_kref, drm_dp_free_mst_port);
+}
+EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
+
static void drm_dp_destroy_mst_branch_device(struct kref *kref)
{
- struct drm_dp_mst_branch *mstb = container_of(kref, struct drm_dp_mst_branch, kref);
+ struct drm_dp_mst_branch *mstb =
+ container_of(kref, struct drm_dp_mst_branch, topology_kref);
+ struct drm_dp_mst_topology_mgr *mgr = mstb->mgr;
struct drm_dp_mst_port *port, *tmp;
bool wake_tx = false;
- /*
- * init kref again to be used by ports to remove mst branch when it is
- * not needed anymore
- */
- kref_init(kref);
-
- if (mstb->port_parent && list_empty(&mstb->port_parent->next))
- kref_get(&mstb->port_parent->kref);
-
- /*
- * destroy all ports - don't need lock
- * as there are no more references to the mst branch
- * device at this point.
- */
+ mutex_lock(&mgr->lock);
list_for_each_entry_safe(port, tmp, &mstb->ports, next) {
list_del(&port->next);
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
}
+ mutex_unlock(&mgr->lock);
/* drop any tx slots msg */
mutex_lock(&mstb->mgr->qlock);
@@ -907,14 +1134,83 @@ static void drm_dp_destroy_mst_branch_device(struct kref *kref)
if (wake_tx)
wake_up_all(&mstb->mgr->tx_waitq);
- kref_put(kref, drm_dp_free_mst_branch_device);
+ drm_dp_mst_put_mstb_malloc(mstb);
}
-static void drm_dp_put_mst_branch_device(struct drm_dp_mst_branch *mstb)
+/**
+ * drm_dp_mst_topology_try_get_mstb() - Increment the topology refcount of a
+ * branch device unless it's zero
+ * @mstb: &struct drm_dp_mst_branch to increment the topology refcount of
+ *
+ * Attempts to grab a topology reference to @mstb, if it hasn't yet been
+ * removed from the topology (e.g. &drm_dp_mst_branch.topology_kref has
+ * reached 0). Holding a topology reference implies that a malloc reference
+ * will be held to @mstb as long as the user holds the topology reference.
+ *
+ * Care should be taken to ensure that the user has at least one malloc
+ * reference to @mstb. If you already have a topology reference to @mstb, you
+ * should use drm_dp_mst_topology_get_mstb() instead.
+ *
+ * See also:
+ * drm_dp_mst_topology_get_mstb()
+ * drm_dp_mst_topology_put_mstb()
+ *
+ * Returns:
+ * * 1: A topology reference was grabbed successfully
+ * * 0: @port is no longer in the topology, no reference was grabbed
+ */
+static int __must_check
+drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
{
- kref_put(&mstb->kref, drm_dp_destroy_mst_branch_device);
+ int ret = kref_get_unless_zero(&mstb->topology_kref);
+
+ if (ret)
+ DRM_DEBUG("mstb %p (%d)\n", mstb,
+ kref_read(&mstb->topology_kref));
+
+ return ret;
}
+/**
+ * drm_dp_mst_topology_get_mstb() - Increment the topology refcount of a
+ * branch device
+ * @mstb: The &struct drm_dp_mst_branch to increment the topology refcount of
+ *
+ * Increments &drm_dp_mst_branch.topology_refcount without checking whether or
+ * not it's already reached 0. This is only valid to use in scenarios where
+ * you are already guaranteed to have at least one active topology reference
+ * to @mstb. Otherwise, drm_dp_mst_topology_try_get_mstb() must be used.
+ *
+ * See also:
+ * drm_dp_mst_topology_try_get_mstb()
+ * drm_dp_mst_topology_put_mstb()
+ */
+static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
+{
+ WARN_ON(kref_read(&mstb->topology_kref) == 0);
+ kref_get(&mstb->topology_kref);
+ DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
+}
+
+/**
+ * drm_dp_mst_topology_put_mstb() - release a topology reference to a branch
+ * device
+ * @mstb: The &struct drm_dp_mst_branch to release the topology reference from
+ *
+ * Releases a topology reference from @mstb by decrementing
+ * &drm_dp_mst_branch.topology_kref.
+ *
+ * See also:
+ * drm_dp_mst_topology_try_get_mstb()
+ * drm_dp_mst_topology_get_mstb()
+ */
+static void
+drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
+{
+ DRM_DEBUG("mstb %p (%d)\n",
+ mstb, kref_read(&mstb->topology_kref) - 1);
+ kref_put(&mstb->topology_kref, drm_dp_destroy_mst_branch_device);
+}
static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
{
@@ -929,19 +1225,18 @@ static void drm_dp_port_teardown_pdt(struct drm_dp_mst_port *port, int old_pdt)
case DP_PEER_DEVICE_MST_BRANCHING:
mstb = port->mstb;
port->mstb = NULL;
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
break;
}
}
static void drm_dp_destroy_port(struct kref *kref)
{
- struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
+ struct drm_dp_mst_port *port =
+ container_of(kref, struct drm_dp_mst_port, topology_kref);
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
if (!port->input) {
- port->vcpi.num_slots = 0;
-
kfree(port->cached_edid);
/*
@@ -955,7 +1250,6 @@ static void drm_dp_destroy_port(struct kref *kref)
* from an EDID retrieval */
mutex_lock(&mgr->destroy_connector_lock);
- kref_get(&port->parent->kref);
list_add(&port->next, &mgr->destroy_connector_list);
mutex_unlock(&mgr->destroy_connector_lock);
schedule_work(&mgr->destroy_connector_work);
@@ -966,25 +1260,95 @@ static void drm_dp_destroy_port(struct kref *kref)
drm_dp_port_teardown_pdt(port, port->pdt);
port->pdt = DP_PEER_DEVICE_NONE;
}
- kfree(port);
+ drm_dp_mst_put_port_malloc(port);
}
-static void drm_dp_put_port(struct drm_dp_mst_port *port)
+/**
+ * drm_dp_mst_topology_try_get_port() - Increment the topology refcount of a
+ * port unless it's zero
+ * @port: &struct drm_dp_mst_port to increment the topology refcount of
+ *
+ * Attempts to grab a topology reference to @port, if it hasn't yet been
+ * removed from the topology (e.g. &drm_dp_mst_port.topology_kref has reached
+ * 0). Holding a topology reference implies that a malloc reference will be
+ * held to @port as long as the user holds the topology reference.
+ *
+ * Care should be taken to ensure that the user has at least one malloc
+ * reference to @port. If you already have a topology reference to @port, you
+ * should use drm_dp_mst_topology_get_port() instead.
+ *
+ * See also:
+ * drm_dp_mst_topology_get_port()
+ * drm_dp_mst_topology_put_port()
+ *
+ * Returns:
+ * * 1: A topology reference was grabbed successfully
+ * * 0: @port is no longer in the topology, no reference was grabbed
+ */
+static int __must_check
+drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
{
- kref_put(&port->kref, drm_dp_destroy_port);
+ int ret = kref_get_unless_zero(&port->topology_kref);
+
+ if (ret)
+ DRM_DEBUG("port %p (%d)\n", port,
+ kref_read(&port->topology_kref));
+
+ return ret;
}
-static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_branch *to_find)
+/**
+ * drm_dp_mst_topology_get_port() - Increment the topology refcount of a port
+ * @port: The &struct drm_dp_mst_port to increment the topology refcount of
+ *
+ * Increments &drm_dp_mst_port.topology_refcount without checking whether or
+ * not it's already reached 0. This is only valid to use in scenarios where
+ * you are already guaranteed to have at least one active topology reference
+ * to @port. Otherwise, drm_dp_mst_topology_try_get_port() must be used.
+ *
+ * See also:
+ * drm_dp_mst_topology_try_get_port()
+ * drm_dp_mst_topology_put_port()
+ */
+static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
+{
+ WARN_ON(kref_read(&port->topology_kref) == 0);
+ kref_get(&port->topology_kref);
+ DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
+}
+
+/**
+ * drm_dp_mst_topology_put_port() - release a topology reference to a port
+ * @port: The &struct drm_dp_mst_port to release the topology reference from
+ *
+ * Releases a topology reference from @port by decrementing
+ * &drm_dp_mst_port.topology_kref.
+ *
+ * See also:
+ * drm_dp_mst_topology_try_get_port()
+ * drm_dp_mst_topology_get_port()
+ */
+static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
+{
+ DRM_DEBUG("port %p (%d)\n",
+ port, kref_read(&port->topology_kref) - 1);
+ kref_put(&port->topology_kref, drm_dp_destroy_port);
+}
+
+static struct drm_dp_mst_branch *
+drm_dp_mst_topology_get_mstb_validated_locked(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_branch *to_find)
{
struct drm_dp_mst_port *port;
struct drm_dp_mst_branch *rmstb;
- if (to_find == mstb) {
- kref_get(&mstb->kref);
+
+ if (to_find == mstb)
return mstb;
- }
+
list_for_each_entry(port, &mstb->ports, next) {
if (port->mstb) {
- rmstb = drm_dp_mst_get_validated_mstb_ref_locked(port->mstb, to_find);
+ rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
+ port->mstb, to_find);
if (rmstb)
return rmstb;
}
@@ -992,27 +1356,37 @@ static struct drm_dp_mst_branch *drm_dp_mst_get_validated_mstb_ref_locked(struct
return NULL;
}
-static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_branch *mstb)
+static struct drm_dp_mst_branch *
+drm_dp_mst_topology_get_mstb_validated(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb)
{
struct drm_dp_mst_branch *rmstb = NULL;
+
mutex_lock(&mgr->lock);
- if (mgr->mst_primary)
- rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb);
+ if (mgr->mst_primary) {
+ rmstb = drm_dp_mst_topology_get_mstb_validated_locked(
+ mgr->mst_primary, mstb);
+
+ if (rmstb && !drm_dp_mst_topology_try_get_mstb(rmstb))
+ rmstb = NULL;
+ }
mutex_unlock(&mgr->lock);
return rmstb;
}
-static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *to_find)
+static struct drm_dp_mst_port *
+drm_dp_mst_topology_get_port_validated_locked(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *to_find)
{
struct drm_dp_mst_port *port, *mport;
list_for_each_entry(port, &mstb->ports, next) {
- if (port == to_find) {
- kref_get(&port->kref);
+ if (port == to_find)
return port;
- }
+
if (port->mstb) {
- mport = drm_dp_mst_get_port_ref_locked(port->mstb, to_find);
+ mport = drm_dp_mst_topology_get_port_validated_locked(
+ port->mstb, to_find);
if (mport)
return mport;
}
@@ -1020,12 +1394,20 @@ static struct drm_dp_mst_port *drm_dp_mst_get_port_ref_locked(struct drm_dp_mst_
return NULL;
}
-static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+static struct drm_dp_mst_port *
+drm_dp_mst_topology_get_port_validated(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port)
{
struct drm_dp_mst_port *rport = NULL;
+
mutex_lock(&mgr->lock);
- if (mgr->mst_primary)
- rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port);
+ if (mgr->mst_primary) {
+ rport = drm_dp_mst_topology_get_port_validated_locked(
+ mgr->mst_primary, port);
+
+ if (rport && !drm_dp_mst_topology_try_get_port(rport))
+ rport = NULL;
+ }
mutex_unlock(&mgr->lock);
return rport;
}
@@ -1033,11 +1415,12 @@ static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_t
static struct drm_dp_mst_port *drm_dp_get_port(struct drm_dp_mst_branch *mstb, u8 port_num)
{
struct drm_dp_mst_port *port;
+ int ret;
list_for_each_entry(port, &mstb->ports, next) {
if (port->port_num == port_num) {
- kref_get(&port->kref);
- return port;
+ ret = drm_dp_mst_topology_try_get_port(port);
+ return ret ? port : NULL;
}
}
@@ -1086,6 +1469,11 @@ static bool drm_dp_port_setup_pdt(struct drm_dp_mst_port *port)
if (port->mstb) {
port->mstb->mgr = port->mgr;
port->mstb->port_parent = port;
+ /*
+ * Make sure this port's memory allocation stays
+ * around until its child MSTB releases it
+ */
+ drm_dp_mst_get_port_malloc(port);
send_link = true;
}
@@ -1146,17 +1534,26 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
bool created = false;
int old_pdt = 0;
int old_ddps = 0;
+
port = drm_dp_get_port(mstb, port_msg->port_number);
if (!port) {
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
return;
- kref_init(&port->kref);
+ kref_init(&port->topology_kref);
+ kref_init(&port->malloc_kref);
port->parent = mstb;
port->port_num = port_msg->port_number;
port->mgr = mstb->mgr;
port->aux.name = "DPMST";
port->aux.dev = dev->dev;
+
+ /*
+ * Make sure the memory allocation for our parent branch stays
+ * around until our own memory allocation is released
+ */
+ drm_dp_mst_get_mstb_malloc(mstb);
+
created = true;
} else {
old_pdt = port->pdt;
@@ -1176,18 +1573,20 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
for this list */
if (created) {
mutex_lock(&mstb->mgr->lock);
- kref_get(&port->kref);
+ drm_dp_mst_topology_get_port(port);
list_add(&port->next, &mstb->ports);
mutex_unlock(&mstb->mgr->lock);
}
if (old_ddps != port->ddps) {
if (port->ddps) {
- if (!port->input)
- drm_dp_send_enum_path_resources(mstb->mgr, mstb, port);
+ if (!port->input) {
+ drm_dp_send_enum_path_resources(mstb->mgr,
+ mstb, port);
+ }
} else {
port->available_pbn = 0;
- }
+ }
}
if (old_pdt != port->pdt && !port->input) {
@@ -1201,21 +1600,25 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
if (created && !port->input) {
char proppath[255];
- build_mst_prop_path(mstb, port->port_num, proppath, sizeof(proppath));
- port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath);
+ build_mst_prop_path(mstb, port->port_num, proppath,
+ sizeof(proppath));
+ port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr,
+ port,
+ proppath);
if (!port->connector) {
/* remove it from the port list */
mutex_lock(&mstb->mgr->lock);
list_del(&port->next);
mutex_unlock(&mstb->mgr->lock);
/* drop port list reference */
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
goto out;
}
if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV ||
port->pdt == DP_PEER_DEVICE_SST_SINK) &&
port->port_num >= DP_MST_LOGICAL_PORT_0) {
- port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
+ port->cached_edid = drm_get_edid(port->connector,
+ &port->aux.ddc);
drm_connector_set_tile_property(port->connector);
}
(*mstb->mgr->cbs->register_connector)(port->connector);
@@ -1223,7 +1626,7 @@ static void drm_dp_add_port(struct drm_dp_mst_branch *mstb,
out:
/* put reference to this port */
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
}
static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
@@ -1258,7 +1661,7 @@ static void drm_dp_update_port(struct drm_dp_mst_branch *mstb,
dowork = true;
}
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
if (dowork)
queue_work(system_long_wq, &mstb->mgr->work);
@@ -1269,7 +1672,7 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
{
struct drm_dp_mst_branch *mstb;
struct drm_dp_mst_port *port;
- int i;
+ int i, ret;
/* find the port by iterating down */
mutex_lock(&mgr->lock);
@@ -1294,7 +1697,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
}
}
}
- kref_get(&mstb->kref);
+ ret = drm_dp_mst_topology_try_get_mstb(mstb);
+ if (!ret)
+ mstb = NULL;
out:
mutex_unlock(&mgr->lock);
return mstb;
@@ -1324,19 +1729,22 @@ static struct drm_dp_mst_branch *get_mst_branch_device_by_guid_helper(
return NULL;
}
-static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device_by_guid(
- struct drm_dp_mst_topology_mgr *mgr,
- uint8_t *guid)
+static struct drm_dp_mst_branch *
+drm_dp_get_mst_branch_device_by_guid(struct drm_dp_mst_topology_mgr *mgr,
+ uint8_t *guid)
{
struct drm_dp_mst_branch *mstb;
+ int ret;
/* find the port by iterating down */
mutex_lock(&mgr->lock);
mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid);
-
- if (mstb)
- kref_get(&mstb->kref);
+ if (mstb) {
+ ret = drm_dp_mst_topology_try_get_mstb(mstb);
+ if (!ret)
+ mstb = NULL;
+ }
mutex_unlock(&mgr->lock);
return mstb;
@@ -1361,10 +1769,11 @@ static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *m
drm_dp_send_enum_path_resources(mgr, mstb, port);
if (port->mstb) {
- mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb);
+ mstb_child = drm_dp_mst_topology_get_mstb_validated(
+ mgr, port->mstb);
if (mstb_child) {
drm_dp_check_and_send_link_address(mgr, mstb_child);
- drm_dp_put_mst_branch_device(mstb_child);
+ drm_dp_mst_topology_put_mstb(mstb_child);
}
}
}
@@ -1374,16 +1783,19 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work);
struct drm_dp_mst_branch *mstb;
+ int ret;
mutex_lock(&mgr->lock);
mstb = mgr->mst_primary;
if (mstb) {
- kref_get(&mstb->kref);
+ ret = drm_dp_mst_topology_try_get_mstb(mstb);
+ if (!ret)
+ mstb = NULL;
}
mutex_unlock(&mgr->lock);
if (mstb) {
drm_dp_check_and_send_link_address(mgr, mstb);
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
}
}
@@ -1617,9 +2029,9 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
if (ret > 0) {
int i;
- if (txmsg->reply.reply_type == 1)
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
DRM_DEBUG_KMS("link address nak received\n");
- else {
+ } else {
DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports);
for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n", i,
@@ -1639,7 +2051,7 @@ static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
}
- (*mgr->cbs->hotplug)(mgr);
+ drm_kms_helper_hotplug_event(mgr->dev);
}
} else {
mstb->link_address_sent = false;
@@ -1668,9 +2080,9 @@ static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
- if (txmsg->reply.reply_type == 1)
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
DRM_DEBUG_KMS("enum path resources nak received\n");
- else {
+ } else {
if (port->port_num != txmsg->reply.u.path_resources.port_number)
DRM_ERROR("got incorrect port in response\n");
DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number,
@@ -1694,22 +2106,40 @@ static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm
return drm_dp_get_last_connected_port_to_mstb(mstb->port_parent->parent);
}
-static struct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_branch *mstb,
- int *port_num)
+/*
+ * Searches upwards in the topology starting from mstb to try to find the
+ * closest available parent of mstb that's still connected to the rest of the
+ * topology. This can be used in order to perform operations like releasing
+ * payloads, where the branch device which owned the payload may no longer be
+ * around and thus would require that the payload on the last living relative
+ * be freed instead.
+ */
+static struct drm_dp_mst_branch *
+drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_branch *mstb,
+ int *port_num)
{
struct drm_dp_mst_branch *rmstb = NULL;
struct drm_dp_mst_port *found_port;
+
mutex_lock(&mgr->lock);
- if (mgr->mst_primary) {
+ if (!mgr->mst_primary)
+ goto out;
+
+ do {
found_port = drm_dp_get_last_connected_port_to_mstb(mstb);
+ if (!found_port)
+ break;
- if (found_port) {
+ if (drm_dp_mst_topology_try_get_mstb(found_port->parent)) {
rmstb = found_port->parent;
- kref_get(&rmstb->kref);
*port_num = found_port->port_num;
+ } else {
+ /* Search again, starting from this parent */
+ mstb = found_port->parent;
}
- }
+ } while (!rmstb);
+out:
mutex_unlock(&mgr->lock);
return rmstb;
}
@@ -1725,19 +2155,15 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
u8 sinks[DRM_DP_MAX_SDP_STREAMS];
int i;
- port = drm_dp_get_validated_port_ref(mgr, port);
- if (!port)
- return -EINVAL;
-
port_num = port->port_num;
- mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
if (!mstb) {
- mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num);
+ mstb = drm_dp_get_last_connected_port_and_mstb(mgr,
+ port->parent,
+ &port_num);
- if (!mstb) {
- drm_dp_put_port(port);
+ if (!mstb)
return -EINVAL;
- }
}
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -1756,17 +2182,24 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
drm_dp_queue_down_tx(mgr, txmsg);
+ /*
+ * FIXME: there is a small chance that between getting the last
+ * connected mstb and sending the payload message, the last connected
+ * mstb could also be removed from the topology. In the future, this
+ * needs to be fixed by restarting the
+ * drm_dp_get_last_connected_port_and_mstb() search in the event of a
+ * timeout if the topology is still connected to the system.
+ */
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
- if (txmsg->reply.reply_type == 1) {
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
ret = -EINVAL;
- } else
+ else
ret = 0;
}
kfree(txmsg);
fail_put:
- drm_dp_put_mst_branch_device(mstb);
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_mstb(mstb);
return ret;
}
@@ -1776,13 +2209,13 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg;
int len, ret;
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return -EINVAL;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg) {
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return -ENOMEM;
}
@@ -1792,13 +2225,13 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
if (ret > 0) {
- if (txmsg->reply.reply_type == 1)
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
ret = -EINVAL;
else
ret = 0;
}
kfree(txmsg);
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return ret;
}
@@ -1838,7 +2271,7 @@ static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_payload *payload)
{
DRM_DEBUG_KMS("\n");
- /* its okay for these to fail */
+ /* it's okay for these to fail */
if (port) {
drm_dp_payload_send_msg(mgr, port, id, 0);
}
@@ -1871,72 +2304,93 @@ static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr,
*/
int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
{
- int i, j;
- int cur_slots = 1;
struct drm_dp_payload req_payload;
struct drm_dp_mst_port *port;
+ int i, j;
+ int cur_slots = 1;
mutex_lock(&mgr->payload_lock);
for (i = 0; i < mgr->max_payloads; i++) {
+ struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
+ struct drm_dp_payload *payload = &mgr->payloads[i];
+ bool put_port = false;
+
/* solve the current payloads - compare to the hw ones
- update the hw view */
req_payload.start_slot = cur_slots;
- if (mgr->proposed_vcpis[i]) {
- port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
- port = drm_dp_get_validated_port_ref(mgr, port);
- if (!port) {
- mutex_unlock(&mgr->payload_lock);
- return -EINVAL;
+ if (vcpi) {
+ port = container_of(vcpi, struct drm_dp_mst_port,
+ vcpi);
+
+ /* Validated ports don't matter if we're releasing
+ * VCPI
+ */
+ if (vcpi->num_slots) {
+ port = drm_dp_mst_topology_get_port_validated(
+ mgr, port);
+ if (!port) {
+ mutex_unlock(&mgr->payload_lock);
+ return -EINVAL;
+ }
+ put_port = true;
}
- req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
- req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
+
+ req_payload.num_slots = vcpi->num_slots;
+ req_payload.vcpi = vcpi->vcpi;
} else {
port = NULL;
req_payload.num_slots = 0;
}
- if (mgr->payloads[i].start_slot != req_payload.start_slot) {
- mgr->payloads[i].start_slot = req_payload.start_slot;
- }
+ payload->start_slot = req_payload.start_slot;
/* work out what is required to happen with this payload */
- if (mgr->payloads[i].num_slots != req_payload.num_slots) {
+ if (payload->num_slots != req_payload.num_slots) {
/* need to push an update for this payload */
if (req_payload.num_slots) {
- drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload);
- mgr->payloads[i].num_slots = req_payload.num_slots;
- mgr->payloads[i].vcpi = req_payload.vcpi;
- } else if (mgr->payloads[i].num_slots) {
- mgr->payloads[i].num_slots = 0;
- drm_dp_destroy_payload_step1(mgr, port, mgr->payloads[i].vcpi, &mgr->payloads[i]);
- req_payload.payload_state = mgr->payloads[i].payload_state;
- mgr->payloads[i].start_slot = 0;
+ drm_dp_create_payload_step1(mgr, vcpi->vcpi,
+ &req_payload);
+ payload->num_slots = req_payload.num_slots;
+ payload->vcpi = req_payload.vcpi;
+
+ } else if (payload->num_slots) {
+ payload->num_slots = 0;
+ drm_dp_destroy_payload_step1(mgr, port,
+ payload->vcpi,
+ payload);
+ req_payload.payload_state =
+ payload->payload_state;
+ payload->start_slot = 0;
}
- mgr->payloads[i].payload_state = req_payload.payload_state;
+ payload->payload_state = req_payload.payload_state;
}
cur_slots += req_payload.num_slots;
- if (port)
- drm_dp_put_port(port);
+ if (put_port)
+ drm_dp_mst_topology_put_port(port);
}
for (i = 0; i < mgr->max_payloads; i++) {
- if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
- DRM_DEBUG_KMS("removing payload %d\n", i);
- for (j = i; j < mgr->max_payloads - 1; j++) {
- memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload));
- mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
- if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) {
- set_bit(j + 1, &mgr->payload_mask);
- } else {
- clear_bit(j + 1, &mgr->payload_mask);
- }
- }
- memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload));
- mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
- clear_bit(mgr->max_payloads, &mgr->payload_mask);
+ if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL)
+ continue;
+ DRM_DEBUG_KMS("removing payload %d\n", i);
+ for (j = i; j < mgr->max_payloads - 1; j++) {
+ mgr->payloads[j] = mgr->payloads[j + 1];
+ mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
+
+ if (mgr->proposed_vcpis[j] &&
+ mgr->proposed_vcpis[j]->num_slots) {
+ set_bit(j + 1, &mgr->payload_mask);
+ } else {
+ clear_bit(j + 1, &mgr->payload_mask);
+ }
}
+
+ memset(&mgr->payloads[mgr->max_payloads - 1], 0,
+ sizeof(struct drm_dp_payload));
+ mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL;
+ clear_bit(mgr->max_payloads, &mgr->payload_mask);
}
mutex_unlock(&mgr->payload_lock);
@@ -2012,7 +2466,7 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
- mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
if (!mstb)
return -EINVAL;
@@ -2029,14 +2483,14 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
- if (txmsg->reply.reply_type == 1) {
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
ret = -EINVAL;
- } else
+ else
ret = 0;
}
kfree(txmsg);
fail_put:
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
return ret;
}
@@ -2044,7 +2498,7 @@ static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req
{
struct drm_dp_sideband_msg_reply_body reply;
- reply.reply_type = 0;
+ reply.reply_type = DP_SIDEBAND_REPLY_ACK;
reply.req_type = req_type;
drm_dp_encode_sideband_reply(&reply, msg);
return 0;
@@ -2146,7 +2600,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
/* give this the main reference */
mgr->mst_primary = mstb;
- kref_get(&mgr->mst_primary->kref);
+ drm_dp_mst_topology_get_mstb(mgr->mst_primary);
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
@@ -2180,7 +2634,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
out_unlock:
mutex_unlock(&mgr->lock);
if (mstb)
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
return ret;
}
@@ -2345,18 +2799,23 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
mgr->down_rep_recv.initial_hdr.lct,
mgr->down_rep_recv.initial_hdr.rad[0],
mgr->down_rep_recv.msg[0]);
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
return 0;
}
drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply);
- if (txmsg->reply.reply_type == 1) {
- DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data);
- }
+
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
+ DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
+ txmsg->reply.req_type,
+ drm_dp_mst_req_type_str(txmsg->reply.req_type),
+ txmsg->reply.u.nak.reason,
+ drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
+ txmsg->reply.u.nak.nak_data);
memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
mutex_lock(&mgr->qlock);
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
@@ -2412,7 +2871,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
drm_dp_update_port(mstb, &msg.u.conn_stat);
DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
- (*mgr->cbs->hotplug)(mgr);
+ drm_kms_helper_hotplug_event(mgr->dev);
} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
@@ -2429,7 +2888,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
}
if (mstb)
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
}
@@ -2488,8 +2947,8 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
{
enum drm_connector_status status = connector_status_disconnected;
- /* we need to search for the port in the mgr in case its gone */
- port = drm_dp_get_validated_port_ref(mgr, port);
+ /* we need to search for the port in the mgr in case it's gone */
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return connector_status_disconnected;
@@ -2514,7 +2973,7 @@ enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector
break;
}
out:
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return status;
}
EXPORT_SYMBOL(drm_dp_mst_detect_port);
@@ -2531,11 +2990,11 @@ bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
{
bool ret = false;
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return ret;
ret = port->has_audio;
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return ret;
}
EXPORT_SYMBOL(drm_dp_mst_port_has_audio);
@@ -2554,8 +3013,8 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
{
struct edid *edid = NULL;
- /* we need to search for the port in the mgr in case its gone */
- port = drm_dp_get_validated_port_ref(mgr, port);
+ /* we need to search for the port in the mgr in case it's gone */
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return NULL;
@@ -2566,7 +3025,7 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
drm_connector_set_tile_property(connector);
}
port->has_audio = drm_detect_monitor_audio(edid);
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return edid;
}
EXPORT_SYMBOL(drm_dp_mst_get_edid);
@@ -2617,43 +3076,90 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr,
}
/**
- * drm_dp_atomic_find_vcpi_slots() - Find and add vcpi slots to the state
+ * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state
* @state: global atomic state
* @mgr: MST topology manager for the port
* @port: port to find vcpi slots for
* @pbn: bandwidth required for the mode in PBN
*
- * RETURNS:
- * Total slots in the atomic state assigned for this port or error
+ * Allocates VCPI slots to @port, replacing any previous VCPI allocations it
+ * may have had. Any atomic drivers which support MST must call this function
+ * in their &drm_encoder_helper_funcs.atomic_check() callback to change the
+ * current VCPI allocation for the new state, but only when
+ * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set
+ * to ensure compatibility with userspace applications that still use the
+ * legacy modesetting UAPI.
+ *
+ * Allocations set by this function are not checked against the bandwidth
+ * restraints of @mgr until the driver calls drm_dp_mst_atomic_check().
+ *
+ * Additionally, it is OK to call this function multiple times on the same
+ * @port as needed. It is not OK however, to call this function and
+ * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase.
+ *
+ * See also:
+ * drm_dp_atomic_release_vcpi_slots()
+ * drm_dp_mst_atomic_check()
+ *
+ * Returns:
+ * Total slots in the atomic state assigned for this port, or a negative error
+ * code if the port no longer exists
*/
int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, int pbn)
{
struct drm_dp_mst_topology_state *topology_state;
- int req_slots;
+ struct drm_dp_vcpi_allocation *pos, *vcpi = NULL;
+ int prev_slots, req_slots, ret;
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(topology_state))
return PTR_ERR(topology_state);
- port = drm_dp_get_validated_port_ref(mgr, port);
- if (port == NULL)
- return -EINVAL;
- req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
- DRM_DEBUG_KMS("vcpi slots req=%d, avail=%d\n",
- req_slots, topology_state->avail_slots);
+ /* Find the current allocation for this port, if any */
+ list_for_each_entry(pos, &topology_state->vcpis, next) {
+ if (pos->port == port) {
+ vcpi = pos;
+ prev_slots = vcpi->vcpi;
+
+ /*
+ * This should never happen, unless the driver tries
+ * releasing and allocating the same VCPI allocation,
+ * which is an error
+ */
+ if (WARN_ON(!prev_slots)) {
+ DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
+ port);
+ return -EINVAL;
+ }
- if (req_slots > topology_state->avail_slots) {
- drm_dp_put_port(port);
- return -ENOSPC;
+ break;
+ }
}
+ if (!vcpi)
+ prev_slots = 0;
- topology_state->avail_slots -= req_slots;
- DRM_DEBUG_KMS("vcpi slots avail=%d", topology_state->avail_slots);
+ req_slots = DIV_ROUND_UP(pbn, mgr->pbn_div);
+
+ DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
+ port->connector->base.id, port->connector->name,
+ port, prev_slots, req_slots);
+
+ /* Add the new allocation to the state */
+ if (!vcpi) {
+ vcpi = kzalloc(sizeof(*vcpi), GFP_KERNEL);
+ if (!vcpi)
+ return -ENOMEM;
+
+ drm_dp_mst_get_port_malloc(port);
+ vcpi->port = port;
+ list_add(&vcpi->next, &topology_state->vcpis);
+ }
+ vcpi->vcpi = req_slots;
- drm_dp_put_port(port);
- return req_slots;
+ ret = req_slots;
+ return ret;
}
EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
@@ -2661,31 +3167,57 @@ EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots);
* drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots
* @state: global atomic state
* @mgr: MST topology manager for the port
- * @slots: number of vcpi slots to release
+ * @port: The port to release the VCPI slots from
*
- * RETURNS:
- * 0 if @slots were added back to &drm_dp_mst_topology_state->avail_slots or
- * negative error code
+ * Releases any VCPI slots that have been allocated to a port in the atomic
+ * state. Any atomic drivers which support MST must call this function in
+ * their &drm_connector_helper_funcs.atomic_check() callback when the
+ * connector will no longer have VCPI allocated (e.g. because its CRTC was
+ * removed) when it had VCPI allocated in the previous atomic state.
+ *
+ * It is OK to call this even if @port has been removed from the system.
+ * Additionally, it is OK to call this function multiple times on the same
+ * @port as needed. It is not OK however, to call this function and
+ * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check
+ * phase.
+ *
+ * See also:
+ * drm_dp_atomic_find_vcpi_slots()
+ * drm_dp_mst_atomic_check()
+ *
+ * Returns:
+ * 0 if all slots for this port were added back to
+ * &drm_dp_mst_topology_state.avail_slots or negative error code
*/
int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
- int slots)
+ struct drm_dp_mst_port *port)
{
struct drm_dp_mst_topology_state *topology_state;
+ struct drm_dp_vcpi_allocation *pos;
+ bool found = false;
topology_state = drm_atomic_get_mst_topology_state(state, mgr);
if (IS_ERR(topology_state))
return PTR_ERR(topology_state);
- /* We cannot rely on port->vcpi.num_slots to update
- * topology_state->avail_slots as the port may not exist if the parent
- * branch device was unplugged. This should be fixed by tracking
- * per-port slot allocation in drm_dp_mst_topology_state instead of
- * depending on the caller to tell us how many slots to release.
- */
- topology_state->avail_slots += slots;
- DRM_DEBUG_KMS("vcpi slots released=%d, avail=%d\n",
- slots, topology_state->avail_slots);
+ list_for_each_entry(pos, &topology_state->vcpis, next) {
+ if (pos->port == port) {
+ found = true;
+ break;
+ }
+ }
+ if (WARN_ON(!found)) {
+ DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
+ port, &topology_state->base);
+ return -EINVAL;
+ }
+
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
+ if (pos->vcpi) {
+ drm_dp_mst_put_port_malloc(port);
+ pos->vcpi = 0;
+ }
return 0;
}
@@ -2703,7 +3235,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
{
int ret;
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return false;
@@ -2711,9 +3243,10 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
return false;
if (port->vcpi.vcpi > 0) {
- DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", port->vcpi.vcpi, port->vcpi.pbn, pbn);
+ DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
+ port->vcpi.vcpi, port->vcpi.pbn, pbn);
if (pbn == port->vcpi.pbn) {
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return true;
}
}
@@ -2721,13 +3254,15 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
if (ret) {
DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
- DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
+ DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
goto out;
}
DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
- pbn, port->vcpi.num_slots);
+ pbn, port->vcpi.num_slots);
- drm_dp_put_port(port);
+ /* Keep port allocated until its payload has been removed */
+ drm_dp_mst_get_port_malloc(port);
+ drm_dp_mst_topology_put_port(port);
return true;
out:
return false;
@@ -2737,12 +3272,12 @@ EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi);
int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
{
int slots = 0;
- port = drm_dp_get_validated_port_ref(mgr, port);
+ port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
return slots;
slots = port->vcpi.num_slots;
- drm_dp_put_port(port);
+ drm_dp_mst_topology_put_port(port);
return slots;
}
EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
@@ -2756,23 +3291,27 @@ EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots);
*/
void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
{
- port = drm_dp_get_validated_port_ref(mgr, port);
- if (!port)
- return;
+ /*
+ * A port with VCPI will remain allocated until its VCPI is
+ * released, no verified ref needed
+ */
+
port->vcpi.num_slots = 0;
- drm_dp_put_port(port);
}
EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots);
/**
* drm_dp_mst_deallocate_vcpi() - deallocate a VCPI
* @mgr: manager for this port
- * @port: unverified port to deallocate vcpi for
+ * @port: port to deallocate vcpi for
+ *
+ * This can be called unconditionally, regardless of whether
+ * drm_dp_mst_allocate_vcpi() succeeded or not.
*/
-void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port)
+void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port)
{
- port = drm_dp_get_validated_port_ref(mgr, port);
- if (!port)
+ if (!port->vcpi.vcpi)
return;
drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
@@ -2780,7 +3319,7 @@ void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_
port->vcpi.pbn = 0;
port->vcpi.aligned_pbn = 0;
port->vcpi.vcpi = 0;
- drm_dp_put_port(port);
+ drm_dp_mst_put_port_malloc(port);
}
EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi);
@@ -3064,13 +3603,6 @@ static void drm_dp_tx_work(struct work_struct *work)
mutex_unlock(&mgr->qlock);
}
-static void drm_dp_free_mst_port(struct kref *kref)
-{
- struct drm_dp_mst_port *port = container_of(kref, struct drm_dp_mst_port, kref);
- kref_put(&port->parent->kref, drm_dp_free_mst_branch_device);
- kfree(port);
-}
-
static void drm_dp_destroy_connector_work(struct work_struct *work)
{
struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_connector_work);
@@ -3091,7 +3623,6 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
list_del(&port->next);
mutex_unlock(&mgr->destroy_connector_lock);
- kref_init(&port->kref);
INIT_LIST_HEAD(&port->next);
mgr->cbs->destroy_connector(mgr, port->connector);
@@ -3099,31 +3630,51 @@ static void drm_dp_destroy_connector_work(struct work_struct *work)
drm_dp_port_teardown_pdt(port, port->pdt);
port->pdt = DP_PEER_DEVICE_NONE;
- if (!port->input && port->vcpi.vcpi > 0) {
- drm_dp_mst_reset_vcpi_slots(mgr, port);
- drm_dp_update_payload_part1(mgr);
- drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi);
- }
-
- kref_put(&port->kref, drm_dp_free_mst_port);
+ drm_dp_mst_put_port_malloc(port);
send_hotplug = true;
}
if (send_hotplug)
- (*mgr->cbs->hotplug)(mgr);
+ drm_kms_helper_hotplug_event(mgr->dev);
}
static struct drm_private_state *
drm_dp_mst_duplicate_state(struct drm_private_obj *obj)
{
- struct drm_dp_mst_topology_state *state;
+ struct drm_dp_mst_topology_state *state, *old_state =
+ to_dp_mst_topology_state(obj->state);
+ struct drm_dp_vcpi_allocation *pos, *vcpi;
- state = kmemdup(obj->state, sizeof(*state), GFP_KERNEL);
+ state = kmemdup(old_state, sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
__drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
+ INIT_LIST_HEAD(&state->vcpis);
+
+ list_for_each_entry(pos, &old_state->vcpis, next) {
+ /* Prune leftover freed VCPI allocations */
+ if (!pos->vcpi)
+ continue;
+
+ vcpi = kmemdup(pos, sizeof(*vcpi), GFP_KERNEL);
+ if (!vcpi)
+ goto fail;
+
+ drm_dp_mst_get_port_malloc(vcpi->port);
+ list_add(&vcpi->next, &state->vcpis);
+ }
+
return &state->base;
+
+fail:
+ list_for_each_entry_safe(pos, vcpi, &state->vcpis, next) {
+ drm_dp_mst_put_port_malloc(pos->port);
+ kfree(pos);
+ }
+ kfree(state);
+
+ return NULL;
}
static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
@@ -3131,14 +3682,99 @@ static void drm_dp_mst_destroy_state(struct drm_private_obj *obj,
{
struct drm_dp_mst_topology_state *mst_state =
to_dp_mst_topology_state(state);
+ struct drm_dp_vcpi_allocation *pos, *tmp;
+
+ list_for_each_entry_safe(pos, tmp, &mst_state->vcpis, next) {
+ /* We only keep references to ports with non-zero VCPIs */
+ if (pos->vcpi)
+ drm_dp_mst_put_port_malloc(pos->port);
+ kfree(pos);
+ }
kfree(mst_state);
}
-static const struct drm_private_state_funcs mst_state_funcs = {
+static inline int
+drm_dp_mst_atomic_check_topology_state(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_topology_state *mst_state)
+{
+ struct drm_dp_vcpi_allocation *vcpi;
+ int avail_slots = 63, payload_count = 0;
+
+ list_for_each_entry(vcpi, &mst_state->vcpis, next) {
+ /* Releasing VCPI is always OK-even if the port is gone */
+ if (!vcpi->vcpi) {
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
+ vcpi->port);
+ continue;
+ }
+
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
+ vcpi->port, vcpi->vcpi);
+
+ avail_slots -= vcpi->vcpi;
+ if (avail_slots < 0) {
+ DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
+ vcpi->port, mst_state,
+ avail_slots + vcpi->vcpi);
+ return -ENOSPC;
+ }
+
+ if (++payload_count > mgr->max_payloads) {
+ DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
+ mgr, mst_state, mgr->max_payloads);
+ return -EINVAL;
+ }
+ }
+ DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
+ mgr, mst_state, avail_slots,
+ 63 - avail_slots);
+
+ return 0;
+}
+
+/**
+ * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an
+ * atomic update is valid
+ * @state: Pointer to the new &struct drm_dp_mst_topology_state
+ *
+ * Checks the given topology state for an atomic update to ensure that it's
+ * valid. This includes checking whether there's enough bandwidth to support
+ * the new VCPI allocations in the atomic update.
+ *
+ * Any atomic drivers supporting DP MST must make sure to call this after
+ * checking the rest of their state in their
+ * &drm_mode_config_funcs.atomic_check() callback.
+ *
+ * See also:
+ * drm_dp_atomic_find_vcpi_slots()
+ * drm_dp_atomic_release_vcpi_slots()
+ *
+ * Returns:
+ *
+ * 0 if the new state is valid, negative error code otherwise.
+ */
+int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
+{
+ struct drm_dp_mst_topology_mgr *mgr;
+ struct drm_dp_mst_topology_state *mst_state;
+ int i, ret = 0;
+
+ for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) {
+ ret = drm_dp_mst_atomic_check_topology_state(mgr, mst_state);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_dp_mst_atomic_check);
+
+const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs = {
.atomic_duplicate_state = drm_dp_mst_duplicate_state,
.atomic_destroy_state = drm_dp_mst_destroy_state,
};
+EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs);
/**
* drm_atomic_get_mst_topology_state: get MST topology state
@@ -3216,13 +3852,11 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
return -ENOMEM;
mst_state->mgr = mgr;
+ INIT_LIST_HEAD(&mst_state->vcpis);
- /* max. time slots - one slot for MTP header */
- mst_state->avail_slots = 63;
-
- drm_atomic_private_obj_init(&mgr->base,
+ drm_atomic_private_obj_init(dev, &mgr->base,
&mst_state->base,
- &mst_state_funcs);
+ &drm_dp_mst_topology_state_funcs);
return 0;
}
@@ -3234,6 +3868,7 @@ EXPORT_SYMBOL(drm_dp_mst_topology_mgr_init);
*/
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
{
+ drm_dp_mst_topology_mgr_set_mst(mgr, false);
flush_work(&mgr->work);
flush_work(&mgr->destroy_connector_work);
mutex_lock(&mgr->payload_lock);
@@ -3249,6 +3884,23 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr)
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_destroy);
+static bool remote_i2c_read_ok(const struct i2c_msg msgs[], int num)
+{
+ int i;
+
+ if (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)
+ return false;
+
+ for (i = 0; i < num - 1; i++) {
+ if (msgs[i].flags & I2C_M_RD ||
+ msgs[i].len > 0xff)
+ return false;
+ }
+
+ return msgs[num - 1].flags & I2C_M_RD &&
+ msgs[num - 1].len <= 0xff;
+}
+
/* I2C device */
static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
int num)
@@ -3258,21 +3910,15 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
struct drm_dp_mst_branch *mstb;
struct drm_dp_mst_topology_mgr *mgr = port->mgr;
unsigned int i;
- bool reading = false;
struct drm_dp_sideband_msg_req_body msg;
struct drm_dp_sideband_msg_tx *txmsg = NULL;
int ret;
- mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ mstb = drm_dp_mst_topology_get_mstb_validated(mgr, port->parent);
if (!mstb)
return -EREMOTEIO;
- /* construct i2c msg */
- /* see if last msg is a read */
- if (msgs[num - 1].flags & I2C_M_RD)
- reading = true;
-
- if (!reading || (num - 1 > DP_REMOTE_I2C_READ_MAX_TRANSACTIONS)) {
+ if (!remote_i2c_read_ok(msgs, num)) {
DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
ret = -EIO;
goto out;
@@ -3286,6 +3932,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
msg.u.i2c_read.transactions[i].i2c_dev_id = msgs[i].addr;
msg.u.i2c_read.transactions[i].num_bytes = msgs[i].len;
msg.u.i2c_read.transactions[i].bytes = msgs[i].buf;
+ msg.u.i2c_read.transactions[i].no_stop_bit = !(msgs[i].flags & I2C_M_STOP);
}
msg.u.i2c_read.read_i2c_device_id = msgs[num - 1].addr;
msg.u.i2c_read.num_bytes_read = msgs[num - 1].len;
@@ -3304,7 +3951,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) {
- if (txmsg->reply.reply_type == 1) { /* got a NAK back */
+ if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
ret = -EREMOTEIO;
goto out;
}
@@ -3317,7 +3964,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs
}
out:
kfree(txmsg);
- drm_dp_put_mst_branch_device(mstb);
+ drm_dp_mst_topology_put_mstb(mstb);
return ret;
}