diff options
Diffstat (limited to 'include/uapi/linux')
| -rw-r--r-- | include/uapi/linux/dma-buf.h | 84 | ||||
| -rw-r--r-- | include/uapi/linux/kfd_ioctl.h | 55 | ||||
| -rw-r--r-- | include/uapi/linux/media-bus-format.h | 6 |
3 files changed, 142 insertions, 3 deletions
diff --git a/include/uapi/linux/dma-buf.h b/include/uapi/linux/dma-buf.h index b1523cb8ab30..5a6fda66d9ad 100644 --- a/include/uapi/linux/dma-buf.h +++ b/include/uapi/linux/dma-buf.h @@ -85,6 +85,88 @@ struct dma_buf_sync { #define DMA_BUF_NAME_LEN 32 +/** + * struct dma_buf_export_sync_file - Get a sync_file from a dma-buf + * + * Userspace can perform a DMA_BUF_IOCTL_EXPORT_SYNC_FILE to retrieve the + * current set of fences on a dma-buf file descriptor as a sync_file. CPU + * waits via poll() or other driver-specific mechanisms typically wait on + * whatever fences are on the dma-buf at the time the wait begins. This + * is similar except that it takes a snapshot of the current fences on the + * dma-buf for waiting later instead of waiting immediately. This is + * useful for modern graphics APIs such as Vulkan which assume an explicit + * synchronization model but still need to inter-operate with dma-buf. + * + * The intended usage pattern is the following: + * + * 1. Export a sync_file with flags corresponding to the expected GPU usage + * via DMA_BUF_IOCTL_EXPORT_SYNC_FILE. + * + * 2. Submit rendering work which uses the dma-buf. The work should wait on + * the exported sync file before rendering and produce another sync_file + * when complete. + * + * 3. Import the rendering-complete sync_file into the dma-buf with flags + * corresponding to the GPU usage via DMA_BUF_IOCTL_IMPORT_SYNC_FILE. + * + * Unlike doing implicit synchronization via a GPU kernel driver's exec ioctl, + * the above is not a single atomic operation. If userspace wants to ensure + * ordering via these fences, it is the respnosibility of userspace to use + * locks or other mechanisms to ensure that no other context adds fences or + * submits work between steps 1 and 3 above. + */ +struct dma_buf_export_sync_file { + /** + * @flags: Read/write flags + * + * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both. + * + * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set, + * the returned sync file waits on any writers of the dma-buf to + * complete. Waiting on the returned sync file is equivalent to + * poll() with POLLIN. + * + * If DMA_BUF_SYNC_WRITE is set, the returned sync file waits on + * any users of the dma-buf (read or write) to complete. Waiting + * on the returned sync file is equivalent to poll() with POLLOUT. + * If both DMA_BUF_SYNC_WRITE and DMA_BUF_SYNC_READ are set, this + * is equivalent to just DMA_BUF_SYNC_WRITE. + */ + __u32 flags; + /** @fd: Returned sync file descriptor */ + __s32 fd; +}; + +/** + * struct dma_buf_import_sync_file - Insert a sync_file into a dma-buf + * + * Userspace can perform a DMA_BUF_IOCTL_IMPORT_SYNC_FILE to insert a + * sync_file into a dma-buf for the purposes of implicit synchronization + * with other dma-buf consumers. This allows clients using explicitly + * synchronized APIs such as Vulkan to inter-op with dma-buf consumers + * which expect implicit synchronization such as OpenGL or most media + * drivers/video. + */ +struct dma_buf_import_sync_file { + /** + * @flags: Read/write flags + * + * Must be DMA_BUF_SYNC_READ, DMA_BUF_SYNC_WRITE, or both. + * + * If DMA_BUF_SYNC_READ is set and DMA_BUF_SYNC_WRITE is not set, + * this inserts the sync_file as a read-only fence. Any subsequent + * implicitly synchronized writes to this dma-buf will wait on this + * fence but reads will not. + * + * If DMA_BUF_SYNC_WRITE is set, this inserts the sync_file as a + * write fence. All subsequent implicitly synchronized access to + * this dma-buf will wait on this fence. + */ + __u32 flags; + /** @fd: Sync file descriptor */ + __s32 fd; +}; + #define DMA_BUF_BASE 'b' #define DMA_BUF_IOCTL_SYNC _IOW(DMA_BUF_BASE, 0, struct dma_buf_sync) @@ -94,5 +176,7 @@ struct dma_buf_sync { #define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 1, const char *) #define DMA_BUF_SET_NAME_A _IOW(DMA_BUF_BASE, 1, __u32) #define DMA_BUF_SET_NAME_B _IOW(DMA_BUF_BASE, 1, __u64) +#define DMA_BUF_IOCTL_EXPORT_SYNC_FILE _IOWR(DMA_BUF_BASE, 2, struct dma_buf_export_sync_file) +#define DMA_BUF_IOCTL_IMPORT_SYNC_FILE _IOW(DMA_BUF_BASE, 3, struct dma_buf_import_sync_file) #endif diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 42975e940758..42b60198b6c5 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -34,9 +34,12 @@ * - 1.6 - Query clear flags in SVM get_attr API * - 1.7 - Checkpoint Restore (CRIU) API * - 1.8 - CRIU - Support for SDMA transfers with GTT BOs + * - 1.9 - Add available memory ioctl + * - 1.10 - Add SMI profiler event log + * - 1.11 - Add unified memory for ctx save/restore area */ #define KFD_IOCTL_MAJOR_VERSION 1 -#define KFD_IOCTL_MINOR_VERSION 8 +#define KFD_IOCTL_MINOR_VERSION 11 struct kfd_ioctl_get_version_args { __u32 major_version; /* from KFD */ @@ -100,6 +103,12 @@ struct kfd_ioctl_get_queue_wave_state_args { __u32 pad; }; +struct kfd_ioctl_get_available_memory_args { + __u64 available; /* from KFD */ + __u32 gpu_id; /* to KFD */ + __u32 pad; +}; + /* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */ #define KFD_IOC_CACHE_POLICY_COHERENT 0 #define KFD_IOC_CACHE_POLICY_NONCOHERENT 1 @@ -463,6 +472,43 @@ enum kfd_smi_event { KFD_SMI_EVENT_THERMAL_THROTTLE = 2, KFD_SMI_EVENT_GPU_PRE_RESET = 3, KFD_SMI_EVENT_GPU_POST_RESET = 4, + KFD_SMI_EVENT_MIGRATE_START = 5, + KFD_SMI_EVENT_MIGRATE_END = 6, + KFD_SMI_EVENT_PAGE_FAULT_START = 7, + KFD_SMI_EVENT_PAGE_FAULT_END = 8, + KFD_SMI_EVENT_QUEUE_EVICTION = 9, + KFD_SMI_EVENT_QUEUE_RESTORE = 10, + KFD_SMI_EVENT_UNMAP_FROM_GPU = 11, + + /* + * max event number, as a flag bit to get events from all processes, + * this requires super user permission, otherwise will not be able to + * receive event from any process. Without this flag to receive events + * from same process. + */ + KFD_SMI_EVENT_ALL_PROCESS = 64 +}; + +enum KFD_MIGRATE_TRIGGERS { + KFD_MIGRATE_TRIGGER_PREFETCH, + KFD_MIGRATE_TRIGGER_PAGEFAULT_GPU, + KFD_MIGRATE_TRIGGER_PAGEFAULT_CPU, + KFD_MIGRATE_TRIGGER_TTM_EVICTION +}; + +enum KFD_QUEUE_EVICTION_TRIGGERS { + KFD_QUEUE_EVICTION_TRIGGER_SVM, + KFD_QUEUE_EVICTION_TRIGGER_USERPTR, + KFD_QUEUE_EVICTION_TRIGGER_TTM, + KFD_QUEUE_EVICTION_TRIGGER_SUSPEND, + KFD_QUEUE_EVICTION_CRIU_CHECKPOINT, + KFD_QUEUE_EVICTION_CRIU_RESTORE +}; + +enum KFD_SVM_UNMAP_TRIGGERS { + KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY, + KFD_SVM_UNMAP_TRIGGER_MMU_NOTIFY_MIGRATE, + KFD_SVM_UNMAP_TRIGGER_UNMAP_FROM_CPU }; #define KFD_SMI_EVENT_MASK_FROM_INDEX(i) (1ULL << ((i) - 1)) @@ -568,6 +614,8 @@ enum kfd_mmio_remap { #define KFD_IOCTL_SVM_FLAG_GPU_EXEC 0x00000010 /* GPUs mostly read, may allow similar optimizations as RO, but writes fault */ #define KFD_IOCTL_SVM_FLAG_GPU_READ_MOSTLY 0x00000020 +/* Keep GPU memory mapping always valid as if XNACK is disable */ +#define KFD_IOCTL_SVM_FLAG_GPU_ALWAYS_MAPPED 0x00000040 /** * kfd_ioctl_svm_op - SVM ioctl operations @@ -826,7 +874,10 @@ struct kfd_ioctl_set_xnack_mode_args { #define AMDKFD_IOC_CRIU_OP \ AMDKFD_IOWR(0x22, struct kfd_ioctl_criu_args) +#define AMDKFD_IOC_AVAILABLE_MEMORY \ + AMDKFD_IOWR(0x23, struct kfd_ioctl_get_available_memory_args) + #define AMDKFD_COMMAND_START 0x01 -#define AMDKFD_COMMAND_END 0x23 +#define AMDKFD_COMMAND_END 0x24 #endif diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h index 0dfc11ee243a..ec3323dbb927 100644 --- a/include/uapi/linux/media-bus-format.h +++ b/include/uapi/linux/media-bus-format.h @@ -34,7 +34,7 @@ #define MEDIA_BUS_FMT_FIXED 0x0001 -/* RGB - next is 0x101e */ +/* RGB - next is 0x1022 */ #define MEDIA_BUS_FMT_RGB444_1X12 0x1016 #define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001 #define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002 @@ -59,9 +59,13 @@ #define MEDIA_BUS_FMT_RGB888_3X8_DELTA 0x101d #define MEDIA_BUS_FMT_RGB888_1X7X4_SPWG 0x1011 #define MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA 0x1012 +#define MEDIA_BUS_FMT_RGB666_1X30_CPADLO 0x101e +#define MEDIA_BUS_FMT_RGB888_1X30_CPADLO 0x101f #define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d #define MEDIA_BUS_FMT_RGB888_1X32_PADHI 0x100f #define MEDIA_BUS_FMT_RGB101010_1X30 0x1018 +#define MEDIA_BUS_FMT_RGB666_1X36_CPADLO 0x1020 +#define MEDIA_BUS_FMT_RGB888_1X36_CPADLO 0x1021 #define MEDIA_BUS_FMT_RGB121212_1X36 0x1019 #define MEDIA_BUS_FMT_RGB161616_1X48 0x101a |
