aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/host1x
diff options
context:
space:
mode:
authorThierry Reding <treding@nvidia.com>2019-12-02 15:29:03 +0100
committerThierry Reding <treding@nvidia.com>2020-01-10 16:37:43 +0100
commitfd67e9c6ed5af223af0daee093593abe3dbb53d4 (patch)
treeb6fa196007ee3b62453fdb5481679ed18185023e /drivers/gpu/host1x
parentgpu: host1x: Rename "parent" to "host" (diff)
downloadlinux-dev-fd67e9c6ed5af223af0daee093593abe3dbb53d4.tar.xz
linux-dev-fd67e9c6ed5af223af0daee093593abe3dbb53d4.zip
drm/tegra: Do not implement runtime PM
The Tegra DRM driver heavily relies on the implementations for runtime suspend/resume to be called at specific times. Unfortunately, there are some cases where that doesn't work. One example is if the user disables runtime PM for a given subdevice. Another example is that the PM core acquires a reference to runtime PM during system sleep, effectively preventing devices from going into low power modes. This is intentional to avoid nasty race conditions, but it also causes system sleep to not function properly on all Tegra systems. Fix this by not implementing runtime PM at all. Instead, a minimal, reference-counted suspend/resume infrastructure is added to the host1x bus. This has the benefit that it can be used regardless of the system power state (or any transitions we might be in), or whether or not the user allows runtime PM. Atomic modesetting guarantees that these functions will end up being called at the right point in time, so the pitfalls for the more generic runtime PM do not apply here. Signed-off-by: Thierry Reding <treding@nvidia.com>
Diffstat (limited to 'drivers/gpu/host1x')
-rw-r--r--drivers/gpu/host1x/bus.c75
1 files changed, 75 insertions, 0 deletions
diff --git a/drivers/gpu/host1x/bus.c b/drivers/gpu/host1x/bus.c
index 50d500345d04..6a995db51d6d 100644
--- a/drivers/gpu/host1x/bus.c
+++ b/drivers/gpu/host1x/bus.c
@@ -710,6 +710,10 @@ int host1x_client_register(struct host1x_client *client)
struct host1x *host1x;
int err;
+ INIT_LIST_HEAD(&client->list);
+ mutex_init(&client->lock);
+ client->usecount = 0;
+
mutex_lock(&devices_lock);
list_for_each_entry(host1x, &devices, list) {
@@ -768,3 +772,74 @@ int host1x_client_unregister(struct host1x_client *client)
return 0;
}
EXPORT_SYMBOL(host1x_client_unregister);
+
+int host1x_client_suspend(struct host1x_client *client)
+{
+ int err = 0;
+
+ mutex_lock(&client->lock);
+
+ if (client->usecount == 1) {
+ if (client->ops && client->ops->suspend) {
+ err = client->ops->suspend(client);
+ if (err < 0)
+ goto unlock;
+ }
+ }
+
+ client->usecount--;
+ dev_dbg(client->dev, "use count: %u\n", client->usecount);
+
+ if (client->parent) {
+ err = host1x_client_suspend(client->parent);
+ if (err < 0)
+ goto resume;
+ }
+
+ goto unlock;
+
+resume:
+ if (client->usecount == 0)
+ if (client->ops && client->ops->resume)
+ client->ops->resume(client);
+
+ client->usecount++;
+unlock:
+ mutex_unlock(&client->lock);
+ return err;
+}
+EXPORT_SYMBOL(host1x_client_suspend);
+
+int host1x_client_resume(struct host1x_client *client)
+{
+ int err = 0;
+
+ mutex_lock(&client->lock);
+
+ if (client->parent) {
+ err = host1x_client_resume(client->parent);
+ if (err < 0)
+ goto unlock;
+ }
+
+ if (client->usecount == 0) {
+ if (client->ops && client->ops->resume) {
+ err = client->ops->resume(client);
+ if (err < 0)
+ goto suspend;
+ }
+ }
+
+ client->usecount++;
+ dev_dbg(client->dev, "use count: %u\n", client->usecount);
+
+ goto unlock;
+
+suspend:
+ if (client->parent)
+ host1x_client_suspend(client->parent);
+unlock:
+ mutex_unlock(&client->lock);
+ return err;
+}
+EXPORT_SYMBOL(host1x_client_resume);