aboutsummaryrefslogtreecommitdiffstats
path: root/include/uapi/linux/dma-buf.h
blob: 5a6fda66d9adf01438619e7e67fa69f0fec2d88d (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
 * Framework for buffer objects that can be shared across devices/subsystems.
 *
 * Copyright(C) 2015 Intel Ltd
 *
 * This program is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 as published by
 * the Free Software Foundation.
 *
 * This program is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 * more details.
 *
 * You should have received a copy of the GNU General Public License along with
 * this program.  If not, see <http://www.gnu.org/licenses/>.
 */

#ifndef _DMA_BUF_UAPI_H_
#define _DMA_BUF_UAPI_H_

#include <linux/types.h>

/**
 * struct dma_buf_sync - Synchronize with CPU access.
 *
 * When a DMA buffer is accessed from the CPU via mmap, it is not always
 * possible to guarantee coherency between the CPU-visible map and underlying
 * memory.  To manage coherency, DMA_BUF_IOCTL_SYNC must be used to bracket
 * any CPU access to give the kernel the chance to shuffle memory around if
 * needed.
 *
 * Prior to accessing the map, the client must call DMA_BUF_IOCTL_SYNC
 * with DMA_BUF_SYNC_START and the appropriate read/write flags.  Once the
 * access is complete, the client should call DMA_BUF_IOCTL_SYNC with
 * DMA_BUF_SYNC_END and the same read/write flags.
 *
 * The synchronization provided via DMA_BUF_IOCTL_SYNC only provides cache
 * coherency.  It does not prevent other processes or devices from
 * accessing the memory at the same time.  If synchronization with a GPU or
 * other device driver is required, it is the client's responsibility to
 * wait for buffer to be ready for reading or writing before calling this
 * ioctl with DMA_BUF_SYNC_START.  Likewise, the client must ensure that
 * follow-up work is not submitted to GPU or other device driver until
 * after this ioctl has been called with DMA_BUF_SYNC_END?
 *
 * If the driver or API with which the client is interacting uses implicit
 * synchronization, waiting for prior work to complete can be done via
 * poll() on the DMA buffer file descriptor.  If the driver or API requires
 * explicit synchronization, the client may have to wait on a sync_file or
 * other synchronization primitive outside the scope of the DMA buffer API.
 */
struct dma_buf_sync {
	/**
	 * @flags: Set of access flags
	 *
	 * DMA_BUF_SYNC_START:
	 *     Indicates the start of a map access session.
	 *
	 * DMA_BUF_SYNC_END:
	 *     Indicates the end of a map access session.
	 *
	 * DMA_BUF_SYNC_READ:
	 *     Indicates that the mapped DMA buffer will be read by the
	 *     client via the CPU map.
	 *
	 * DMA_BUF_SYNC_WRITE:
	 *     Indicates that the mapped DMA buffer will be written by the
	 *     client via the CPU map.
	 *
	 * DMA_BUF_SYNC_RW:
	 *     An alias for DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE.
	 */
	__u64 flags;
};

#define DMA_BUF_SYNC_READ      (1 << 0)
#define DMA_BUF_SYNC_WRITE     (2 << 0)
#define DMA_BUF_SYNC_RW        (DMA_BUF_SYNC_READ | DMA_BUF_SYNC_WRITE)
#define DMA_BUF_SYNC_START     (0 << 2)
#define DMA_BUF_SYNC_END       (1 << 2)
#define DMA_BUF_SYNC_VALID_FLAGS_MASK \
	(DMA_BUF_SYNC_RW | DMA_BUF_SYNC_END)

#define DMA_BUF_NAME_LEN	32

/**
 * struct dma_buf_export_sync_file - Get a sync_file from a dma-buf
 *
 * Userspace can perform a DMA_BUF_IOCTL_EXPORT_SYNC_FILE to retrieve the
 * current set of fences on a dma-buf file descriptor as a sync_file.  CPU
 * waits via poll() or other driver-specific mechanisms typically wait on
 * whatever fences are on the dma-buf at the time the wait begins.  This
 * is similar except that it takes a snapshot of the current fences on the
 * dma-buf for waiting later instead of waiting immediately.  This is
 * useful for modern graphics APIs such as Vulkan which assume an explicit
 * synchronization model but still need to inter-operate with dma-buf.
 *
 * The intended usage pattern is the following:
 *
 *  1. Export a sync_file with flags corresponding to the expected GPU usage
 *     via DMA_BUF_IOCTL_EXPORT_SYNC_FILE.
 *
 *  2. Submit rendering work which uses the dma-buf.  The work should wait on
 *     the exported sync file before rendering and produce another sync_file
 *     when complete.
 *
 *  3. Import the rendering-complete sync_file into the dma-buf with flags
 *     corresponding to the GPU usage via DMA_BUF_IOCTL_IMPORT_SYNC_FILE.
 *
 * Unlike doing implicit synchronization via a GPU kernel driver's exec ioctl,
 * the above is not a single atomic operation.  If userspace wants to ensure
 * ordering via these fences, it is the respnosibility of userspace to use
 * locks or other mechanisms to ensure that no other context adds fences or
 * submits work between steps 1 and 3 above.
 */
struct dma_buf_export_sync_file {
	/**
	 * @flags: Read/write flags
	 *
	 * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both.
	 *
	 * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set,
	 * the returned sync file waits on any writers of the dma-buf to
	 * complete.  Waiting on the returned sync file is equivalent to
	 * poll() with POLLIN.
	 *
	 * If DMA_BUF_SYNC_WRITE is set, the returned sync file waits on
	 * any users of the dma-buf (read or write) to complete.  Waiting
	 * on the returned sync file is equivalent to poll() with POLLOUT.
	 * If both DMA_BUF_SYNC_WRITE and DMA_BUF_SYNC_READ are set, this
	 * is equivalent to just DMA_BUF_SYNC_WRITE.
	 */
	__u32 flags;
	/** @fd: Returned sync file descriptor */
	__s32 fd;
};

/**
 * struct dma_buf_import_sync_file - Insert a sync_file into a dma-buf
 *
 * Userspace can perform a DMA_BUF_IOCTL_IMPORT_SYNC_FILE to insert a
 * sync_file into a dma-buf for the purposes of implicit synchronization
 * with other dma-buf consumers.  This allows clients using explicitly
 * synchronized APIs such as Vulkan to inter-op with dma-buf consumers
 * which expect implicit synchronization such as OpenGL or most media
 * drivers/video.
 */
struct dma_buf_import_sync_file {
	/**
	 * @flags: Read/write flags
	 *
	 * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both.
	 *
	 * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set,
	 * this inserts the sync_file as a read-only fence.  Any subsequent
	 * implicitly synchronized writes to this dma-buf will wait on this
	 * fence but reads will not.
	 *
	 * If DMA_BUF_SYNC_WRITE is set, this inserts the sync_file as a
	 * write fence.  All subsequent implicitly synchronized access to
	 * this dma-buf will wait on this fence.
	 */
	__u32 flags;
	/** @fd: Sync file descriptor */
	__s32 fd;
};

#define DMA_BUF_BASE		'b'
#define DMA_BUF_IOCTL_SYNC	_IOW(DMA_BUF_BASE, 0, struct dma_buf_sync)

/* 32/64bitness of this uapi was botched in android, there's no difference
 * between them in actual uapi, they're just different numbers.
 */
#define DMA_BUF_SET_NAME	_IOW(DMA_BUF_BASE, 1, const char *)
#define DMA_BUF_SET_NAME_A	_IOW(DMA_BUF_BASE, 1, __u32)
#define DMA_BUF_SET_NAME_B	_IOW(DMA_BUF_BASE, 1, __u64)
#define DMA_BUF_IOCTL_EXPORT_SYNC_FILE	_IOWR(DMA_BUF_BASE, 2, struct dma_buf_export_sync_file)
#define DMA_BUF_IOCTL_IMPORT_SYNC_FILE	_IOW(DMA_BUF_BASE, 3, struct dma_buf_import_sync_file)

#endif