aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/drivers/gpu/host1x/fence.c
blob: 139ad1afd935bbe791de3ffa7b1ca9b4d6f4458c (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Syncpoint dma_fence implementation
 *
 * Copyright (c) 2020, NVIDIA Corporation.
 */

#include <linux/dma-fence.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/slab.h>
#include <linux/sync_file.h>

#include "fence.h"
#include "intr.h"
#include "syncpt.h"

static const char *host1x_syncpt_fence_get_driver_name(struct dma_fence *f)
{
	return "host1x";
}

static const char *host1x_syncpt_fence_get_timeline_name(struct dma_fence *f)
{
	return "syncpoint";
}

static struct host1x_syncpt_fence *to_host1x_fence(struct dma_fence *f)
{
	return container_of(f, struct host1x_syncpt_fence, base);
}

static bool host1x_syncpt_fence_enable_signaling(struct dma_fence *f)
{
	struct host1x_syncpt_fence *sf = to_host1x_fence(f);

	if (host1x_syncpt_is_expired(sf->sp, sf->threshold))
		return false;

	/* Reference for interrupt path. */
	dma_fence_get(f);

	/*
	 * The dma_fence framework requires the fence driver to keep a
	 * reference to any fences for which 'enable_signaling' has been
	 * called (and that have not been signalled).
	 *
	 * We cannot currently always guarantee that all fences get signalled
	 * or cancelled. As such, for such situations, set up a timeout, so
	 * that long-lasting fences will get reaped eventually.
	 */
	if (sf->timeout) {
		/* Reference for timeout path. */
		dma_fence_get(f);
		schedule_delayed_work(&sf->timeout_work, msecs_to_jiffies(30000));
	}

	host1x_intr_add_fence_locked(sf->sp->host, sf);

	/*
	 * The fence may get signalled at any time after the above call,
	 * so we need to initialize all state used by signalling
	 * before it.
	 */

	return true;
}

static const struct dma_fence_ops host1x_syncpt_fence_ops = {
	.get_driver_name = host1x_syncpt_fence_get_driver_name,
	.get_timeline_name = host1x_syncpt_fence_get_timeline_name,
	.enable_signaling = host1x_syncpt_fence_enable_signaling,
};

void host1x_fence_signal(struct host1x_syncpt_fence *f)
{
	if (atomic_xchg(&f->signaling, 1)) {
		/*
		 * Already on timeout path, but we removed the fence before
		 * timeout path could, so drop interrupt path reference.
		 */
		dma_fence_put(&f->base);
		return;
	}

	if (f->timeout && cancel_delayed_work(&f->timeout_work)) {
		/*
		 * We know that the timeout path will not be entered.
		 * Safe to drop the timeout path's reference now.
		 */
		dma_fence_put(&f->base);
	}

	dma_fence_signal_locked(&f->base);
	dma_fence_put(&f->base);
}

static void do_fence_timeout(struct work_struct *work)
{
	struct delayed_work *dwork = (struct delayed_work *)work;
	struct host1x_syncpt_fence *f =
		container_of(dwork, struct host1x_syncpt_fence, timeout_work);

	if (atomic_xchg(&f->signaling, 1)) {
		/* Already on interrupt path, drop timeout path reference if any. */
		if (f->timeout)
			dma_fence_put(&f->base);
		return;
	}

	if (host1x_intr_remove_fence(f->sp->host, f)) {
		/*
		 * Managed to remove fence from queue, so it's safe to drop
		 * the interrupt path's reference.
		 */
		dma_fence_put(&f->base);
	}

	dma_fence_set_error(&f->base, -ETIMEDOUT);
	dma_fence_signal(&f->base);
	if (f->timeout)
		dma_fence_put(&f->base);
}

struct dma_fence *host1x_fence_create(struct host1x_syncpt *sp, u32 threshold,
				      bool timeout)
{
	struct host1x_syncpt_fence *fence;

	fence = kzalloc(sizeof(*fence), GFP_KERNEL);
	if (!fence)
		return ERR_PTR(-ENOMEM);

	fence->sp = sp;
	fence->threshold = threshold;
	fence->timeout = timeout;

	dma_fence_init(&fence->base, &host1x_syncpt_fence_ops, &sp->fences.lock,
		       dma_fence_context_alloc(1), 0);

	INIT_DELAYED_WORK(&fence->timeout_work, do_fence_timeout);

	return &fence->base;
}
EXPORT_SYMBOL(host1x_fence_create);

void host1x_fence_cancel(struct dma_fence *f)
{
	struct host1x_syncpt_fence *sf = to_host1x_fence(f);

	schedule_delayed_work(&sf->timeout_work, 0);
	flush_delayed_work(&sf->timeout_work);
}
EXPORT_SYMBOL(host1x_fence_cancel);