aboutsummaryrefslogtreecommitdiffstats
path: root/include/drm
diff options
context:
space:
mode:
Diffstat (limited to 'include/drm')
-rw-r--r--include/drm/amd_asic_type.h15
-rw-r--r--include/drm/drm_atomic.h4
-rw-r--r--include/drm/drm_displayid.h30
-rw-r--r--include/drm/drm_dp_helper.h55
-rw-r--r--include/drm/drm_drv.h2
-rw-r--r--include/drm/drm_edid.h3
-rw-r--r--include/drm/drm_encoder.h18
-rw-r--r--include/drm/drm_gem_atomic_helper.h113
-rw-r--r--include/drm/drm_gem_framebuffer_helper.h7
-rw-r--r--include/drm/drm_gem_vram_helper.h6
-rw-r--r--include/drm/drm_hdcp.h5
-rw-r--r--include/drm/drm_modeset_helper_vtables.h31
-rw-r--r--include/drm/drm_plane.h25
-rw-r--r--include/drm/drm_print.h20
-rw-r--r--include/drm/drm_simple_kms_helper.h29
-rw-r--r--include/drm/drm_vblank.h1
-rw-r--r--include/drm/gpu_scheduler.h27
-rw-r--r--include/drm/gud.h333
-rw-r--r--include/drm/i915_pciids.h11
-rw-r--r--include/drm/ttm/ttm_bo_api.h50
-rw-r--r--include/drm/ttm/ttm_bo_driver.h330
-rw-r--r--include/drm/ttm/ttm_device.h317
-rw-r--r--include/drm/ttm/ttm_memory.h95
-rw-r--r--include/drm/ttm/ttm_resource.h4
-rw-r--r--include/drm/ttm/ttm_tt.h14
25 files changed, 999 insertions, 546 deletions
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index cde3c8c9f20c..336e36506910 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -51,13 +51,14 @@ enum amd_asic_type {
CHIP_RAVEN, /* 22 */
CHIP_ARCTURUS, /* 23 */
CHIP_RENOIR, /* 24 */
- CHIP_NAVI10, /* 25 */
- CHIP_NAVI14, /* 26 */
- CHIP_NAVI12, /* 27 */
- CHIP_SIENNA_CICHLID, /* 28 */
- CHIP_NAVY_FLOUNDER, /* 29 */
- CHIP_VANGOGH, /* 30 */
- CHIP_DIMGREY_CAVEFISH, /* 31 */
+ CHIP_ALDEBARAN, /* 25 */
+ CHIP_NAVI10, /* 26 */
+ CHIP_NAVI14, /* 27 */
+ CHIP_NAVI12, /* 28 */
+ CHIP_SIENNA_CICHLID, /* 29 */
+ CHIP_NAVY_FLOUNDER, /* 30 */
+ CHIP_VANGOGH, /* 31 */
+ CHIP_DIMGREY_CAVEFISH, /* 32 */
CHIP_LAST,
};
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index ce7023e9115d..ac5a28eff2c8 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -66,6 +66,8 @@
*
* For an implementation of how to use this look at
* drm_atomic_helper_setup_commit() from the atomic helper library.
+ *
+ * See also drm_crtc_commit_wait().
*/
struct drm_crtc_commit {
/**
@@ -436,6 +438,8 @@ static inline void drm_crtc_commit_put(struct drm_crtc_commit *commit)
kref_put(&commit->ref, __drm_crtc_commit_free);
}
+int drm_crtc_commit_wait(struct drm_crtc_commit *commit);
+
struct drm_atomic_state * __must_check
drm_atomic_state_alloc(struct drm_device *dev);
void drm_atomic_state_clear(struct drm_atomic_state *state);
diff --git a/include/drm/drm_displayid.h b/include/drm/drm_displayid.h
index 77941efb5426..ec64d141f578 100644
--- a/include/drm/drm_displayid.h
+++ b/include/drm/drm_displayid.h
@@ -22,6 +22,10 @@
#ifndef DRM_DISPLAYID_H
#define DRM_DISPLAYID_H
+#include <linux/types.h>
+
+struct edid;
+
#define DATA_BLOCK_PRODUCT_ID 0x00
#define DATA_BLOCK_DISPLAY_PARAMETERS 0x01
#define DATA_BLOCK_COLOR_CHARACTERISTICS 0x02
@@ -52,7 +56,7 @@
#define PRODUCT_TYPE_REPEATER 5
#define PRODUCT_TYPE_DIRECT_DRIVE 6
-struct displayid_hdr {
+struct displayid_header {
u8 rev;
u8 bytes;
u8 prod_id;
@@ -92,12 +96,22 @@ struct displayid_detailed_timing_block {
struct displayid_detailed_timings_1 timings[];
};
-#define for_each_displayid_db(displayid, block, idx, length) \
- for ((block) = (struct displayid_block *)&(displayid)[idx]; \
- (idx) + sizeof(struct displayid_block) <= (length) && \
- (idx) + sizeof(struct displayid_block) + (block)->num_bytes <= (length) && \
- (block)->num_bytes > 0; \
- (idx) += sizeof(struct displayid_block) + (block)->num_bytes, \
- (block) = (struct displayid_block *)&(displayid)[idx])
+/* DisplayID iteration */
+struct displayid_iter {
+ const struct edid *edid;
+
+ const u8 *section;
+ int length;
+ int idx;
+ int ext_index;
+};
+
+void displayid_iter_edid_begin(const struct edid *edid,
+ struct displayid_iter *iter);
+const struct displayid_block *
+__displayid_iter_next(struct displayid_iter *iter);
+#define displayid_iter_for_each(__block, __iter) \
+ while (((__block) = __displayid_iter_next(__iter)))
+void displayid_iter_end(struct displayid_iter *iter);
#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index edffd1dcca3e..1e85c2021f2f 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1016,6 +1016,11 @@ struct drm_device;
#define DP_EDP_REGIONAL_BACKLIGHT_BASE 0x740 /* eDP 1.4 */
#define DP_EDP_REGIONAL_BACKLIGHT_0 0x741 /* eDP 1.4 */
+#define DP_EDP_MSO_LINK_CAPABILITIES 0x7a4 /* eDP 1.4 */
+# define DP_EDP_MSO_NUMBER_OF_LINKS_MASK (7 << 0)
+# define DP_EDP_MSO_NUMBER_OF_LINKS_SHIFT 0
+# define DP_EDP_MSO_INDEPENDENT_LINK_BIT (1 << 3)
+
/* Sideband MSG Buffers */
#define DP_SIDEBAND_MSG_DOWN_REQ_BASE 0x1000 /* 1.2 MST */
#define DP_SIDEBAND_MSG_UP_REP_BASE 0x1200 /* 1.2 MST */
@@ -1171,6 +1176,7 @@ struct drm_device;
# define DP_PCON_ENABLE_MAX_BW_48GBPS 6
# define DP_PCON_ENABLE_SOURCE_CTL_MODE (1 << 3)
# define DP_PCON_ENABLE_CONCURRENT_LINK (1 << 4)
+# define DP_PCON_ENABLE_SEQUENTIAL_LINK (0 << 4)
# define DP_PCON_ENABLE_LINK_FRL_MODE (1 << 5)
# define DP_PCON_ENABLE_HPD_READY (1 << 6)
# define DP_PCON_ENABLE_HDMI_LINK (1 << 7)
@@ -1185,6 +1191,7 @@ struct drm_device;
# define DP_PCON_FRL_BW_MASK_40GBPS (1 << 4)
# define DP_PCON_FRL_BW_MASK_48GBPS (1 << 5)
# define DP_PCON_FRL_LINK_TRAIN_EXTENDED (1 << 6)
+# define DP_PCON_FRL_LINK_TRAIN_NORMAL (0 << 6)
/* PCON HDMI LINK STATUS */
#define DP_PCON_HDMI_TX_LINK_STATUS 0x303B
@@ -1839,34 +1846,34 @@ struct drm_dp_aux_cec {
* @crc_count: counter of captured frame CRCs
* @transfer: transfers a message representing a single AUX transaction
*
- * The .dev field should be set to a pointer to the device that implements
- * the AUX channel.
+ * The @dev field should be set to a pointer to the device that implements the
+ * AUX channel.
*
- * The .name field may be used to specify the name of the I2C adapter. If set to
- * NULL, dev_name() of .dev will be used.
+ * The @name field may be used to specify the name of the I2C adapter. If set to
+ * %NULL, dev_name() of @dev will be used.
*
- * Drivers provide a hardware-specific implementation of how transactions
- * are executed via the .transfer() function. A pointer to a drm_dp_aux_msg
+ * Drivers provide a hardware-specific implementation of how transactions are
+ * executed via the @transfer() function. A pointer to a &drm_dp_aux_msg
* structure describing the transaction is passed into this function. Upon
- * success, the implementation should return the number of payload bytes
- * that were transferred, or a negative error-code on failure. Helpers
- * propagate errors from the .transfer() function, with the exception of
- * the -EBUSY error, which causes a transaction to be retried. On a short,
- * helpers will return -EPROTO to make it simpler to check for failure.
+ * success, the implementation should return the number of payload bytes that
+ * were transferred, or a negative error-code on failure. Helpers propagate
+ * errors from the @transfer() function, with the exception of the %-EBUSY
+ * error, which causes a transaction to be retried. On a short, helpers will
+ * return %-EPROTO to make it simpler to check for failure.
*
* An AUX channel can also be used to transport I2C messages to a sink. A
- * typical application of that is to access an EDID that's present in the
- * sink device. The .transfer() function can also be used to execute such
- * transactions. The drm_dp_aux_register() function registers an I2C
- * adapter that can be passed to drm_probe_ddc(). Upon removal, drivers
- * should call drm_dp_aux_unregister() to remove the I2C adapter.
- * The I2C adapter uses long transfers by default; if a partial response is
- * received, the adapter will drop down to the size given by the partial
- * response for this transaction only.
+ * typical application of that is to access an EDID that's present in the sink
+ * device. The @transfer() function can also be used to execute such
+ * transactions. The drm_dp_aux_register() function registers an I2C adapter
+ * that can be passed to drm_probe_ddc(). Upon removal, drivers should call
+ * drm_dp_aux_unregister() to remove the I2C adapter. The I2C adapter uses long
+ * transfers by default; if a partial response is received, the adapter will
+ * drop down to the size given by the partial response for this transaction
+ * only.
*
- * Note that the aux helper code assumes that the .transfer() function
- * only modifies the reply field of the drm_dp_aux_msg structure. The
- * retry logic and i2c helpers assume this is the case.
+ * Note that the aux helper code assumes that the @transfer() function only
+ * modifies the reply field of the &drm_dp_aux_msg structure. The retry logic
+ * and i2c helpers assume this is the case.
*/
struct drm_dp_aux {
const char *name;
@@ -2149,9 +2156,9 @@ int drm_dp_get_pcon_max_frl_bw(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd);
bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux);
int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps,
- bool concurrent_mode);
+ u8 frl_mode);
int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask,
- bool extended_train_mode);
+ u8 frl_type);
int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux);
int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux);
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 827838e0a97e..b439ae1921b8 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -74,7 +74,7 @@ enum drm_driver_feature {
* @DRIVER_ATOMIC:
*
* Driver supports the full atomic modesetting userspace API. Drivers
- * which only use atomic internally, but do not the support the full
+ * which only use atomic internally, but do not support the full
* userspace API (e.g. not all properties converted to atomic, or
* multi-plane updates are not guaranteed to be tear-free) should not
* set this flag.
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index a158f585f658..759328a5eeb2 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -543,5 +543,8 @@ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
struct drm_display_mode *
drm_display_mode_from_cea_vic(struct drm_device *dev,
u8 video_code);
+const u8 *drm_find_edid_extension(const struct edid *edid,
+ int ext_id, int *ext_index);
+
#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 5bf78b5bcb2b..6e91a0280f31 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -225,6 +225,24 @@ void *__drmm_encoder_alloc(struct drm_device *dev,
encoder_type, name, ##__VA_ARGS__))
/**
+ * drmm_plain_encoder_alloc - Allocate and initialize an encoder
+ * @dev: drm device
+ * @funcs: callbacks for this encoder (optional)
+ * @encoder_type: user visible type of the encoder
+ * @name: printf style format string for the encoder name, or NULL for default name
+ *
+ * This is a simplified version of drmm_encoder_alloc(), which only allocates
+ * and returns a struct drm_encoder instance, with no subclassing.
+ *
+ * Returns:
+ * Pointer to the new drm_encoder struct, or ERR_PTR on failure.
+ */
+#define drmm_plain_encoder_alloc(dev, funcs, encoder_type, name, ...) \
+ ((struct drm_encoder *) \
+ __drmm_encoder_alloc(dev, sizeof(struct drm_encoder), \
+ 0, funcs, encoder_type, name, ##__VA_ARGS__))
+
+/**
* drm_encoder_index - find the index of a registered encoder
* @encoder: encoder to find index for
*
diff --git a/include/drm/drm_gem_atomic_helper.h b/include/drm/drm_gem_atomic_helper.h
new file mode 100644
index 000000000000..cfc5adee3d13
--- /dev/null
+++ b/include/drm/drm_gem_atomic_helper.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef __DRM_GEM_ATOMIC_HELPER_H__
+#define __DRM_GEM_ATOMIC_HELPER_H__
+
+#include <linux/dma-buf-map.h>
+
+#include <drm/drm_plane.h>
+
+struct drm_simple_display_pipe;
+
+/*
+ * Plane Helpers
+ */
+
+int drm_gem_plane_helper_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state);
+int drm_gem_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+
+/*
+ * Helpers for planes with shadow buffers
+ */
+
+/**
+ * struct drm_shadow_plane_state - plane state for planes with shadow buffers
+ *
+ * For planes that use a shadow buffer, struct drm_shadow_plane_state
+ * provides the regular plane state plus mappings of the shadow buffer
+ * into kernel address space.
+ */
+struct drm_shadow_plane_state {
+ /** @base: plane state */
+ struct drm_plane_state base;
+
+ /* Transitional state - do not export or duplicate */
+
+ /**
+ * @map: Mappings of the plane's framebuffer BOs in to kernel address space
+ *
+ * The memory mappings stored in map should be established in the plane's
+ * prepare_fb callback and removed in the cleanup_fb callback.
+ */
+ struct dma_buf_map map[4];
+};
+
+/**
+ * to_drm_shadow_plane_state - upcasts from struct drm_plane_state
+ * @state: the plane state
+ */
+static inline struct drm_shadow_plane_state *
+to_drm_shadow_plane_state(struct drm_plane_state *state)
+{
+ return container_of(state, struct drm_shadow_plane_state, base);
+}
+
+void drm_gem_reset_shadow_plane(struct drm_plane *plane);
+struct drm_plane_state *drm_gem_duplicate_shadow_plane_state(struct drm_plane *plane);
+void drm_gem_destroy_shadow_plane_state(struct drm_plane *plane,
+ struct drm_plane_state *plane_state);
+
+/**
+ * DRM_GEM_SHADOW_PLANE_FUNCS -
+ * Initializes struct drm_plane_funcs for shadow-buffered planes
+ *
+ * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This
+ * macro initializes struct drm_plane_funcs to use the rsp helper functions.
+ */
+#define DRM_GEM_SHADOW_PLANE_FUNCS \
+ .reset = drm_gem_reset_shadow_plane, \
+ .atomic_duplicate_state = drm_gem_duplicate_shadow_plane_state, \
+ .atomic_destroy_state = drm_gem_destroy_shadow_plane_state
+
+int drm_gem_prepare_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state);
+void drm_gem_cleanup_shadow_fb(struct drm_plane *plane, struct drm_plane_state *plane_state);
+
+/**
+ * DRM_GEM_SHADOW_PLANE_HELPER_FUNCS -
+ * Initializes struct drm_plane_helper_funcs for shadow-buffered planes
+ *
+ * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This
+ * macro initializes struct drm_plane_helper_funcs to use the rsp helper
+ * functions.
+ */
+#define DRM_GEM_SHADOW_PLANE_HELPER_FUNCS \
+ .prepare_fb = drm_gem_prepare_shadow_fb, \
+ .cleanup_fb = drm_gem_cleanup_shadow_fb
+
+int drm_gem_simple_kms_prepare_shadow_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+void drm_gem_simple_kms_cleanup_shadow_fb(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+void drm_gem_simple_kms_reset_shadow_plane(struct drm_simple_display_pipe *pipe);
+struct drm_plane_state *
+drm_gem_simple_kms_duplicate_shadow_plane_state(struct drm_simple_display_pipe *pipe);
+void drm_gem_simple_kms_destroy_shadow_plane_state(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
+
+/**
+ * DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS -
+ * Initializes struct drm_simple_display_pipe_funcs for shadow-buffered planes
+ *
+ * Drivers may use GEM BOs as shadow buffers over the framebuffer memory. This
+ * macro initializes struct drm_simple_display_pipe_funcs to use the rsp helper
+ * functions.
+ */
+#define DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS \
+ .prepare_fb = drm_gem_simple_kms_prepare_shadow_fb, \
+ .cleanup_fb = drm_gem_simple_kms_cleanup_shadow_fb, \
+ .reset_plane = drm_gem_simple_kms_reset_shadow_plane, \
+ .duplicate_plane_state = drm_gem_simple_kms_duplicate_shadow_plane_state, \
+ .destroy_plane_state = drm_gem_simple_kms_destroy_shadow_plane_state
+
+#endif /* __DRM_GEM_ATOMIC_HELPER_H__ */
diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h
index 6b013154911d..6bdffc7aa124 100644
--- a/include/drm/drm_gem_framebuffer_helper.h
+++ b/include/drm/drm_gem_framebuffer_helper.h
@@ -9,9 +9,6 @@ struct drm_framebuffer;
struct drm_framebuffer_funcs;
struct drm_gem_object;
struct drm_mode_fb_cmd2;
-struct drm_plane;
-struct drm_plane_state;
-struct drm_simple_display_pipe;
#define AFBC_VENDOR_AND_TYPE_MASK GENMASK_ULL(63, 52)
@@ -44,8 +41,4 @@ int drm_gem_fb_afbc_init(struct drm_device *dev,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_afbc_framebuffer *afbc_fb);
-int drm_gem_fb_prepare_fb(struct drm_plane *plane,
- struct drm_plane_state *state);
-int drm_gem_fb_simple_display_pipe_prepare_fb(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state);
#endif
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index a4bac02249c2..288055d397d9 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -172,19 +172,19 @@ struct drm_vram_mm {
uint64_t vram_base;
size_t vram_size;
- struct ttm_bo_device bdev;
+ struct ttm_device bdev;
};
/**
* drm_vram_mm_of_bdev() - \
- Returns the container of type &struct ttm_bo_device for field bdev.
+ Returns the container of type &struct ttm_device for field bdev.
* @bdev: the TTM BO device
*
* Returns:
* The containing instance of &struct drm_vram_mm
*/
static inline struct drm_vram_mm *drm_vram_mm_of_bdev(
- struct ttm_bo_device *bdev)
+ struct ttm_device *bdev)
{
return container_of(bdev, struct drm_vram_mm, bdev);
}
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index ac22c246542a..0b1111e3228e 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -224,11 +224,14 @@ struct hdcp2_rep_stream_ready {
/* HDCP2.2 TIMEOUTs in mSec */
#define HDCP_2_2_CERT_TIMEOUT_MS 100
+#define HDCP_2_2_DP_CERT_READ_TIMEOUT_MS 110
#define HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS 1000
#define HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS 200
+#define HDCP_2_2_DP_HPRIME_READ_TIMEOUT_MS 7
#define HDCP_2_2_PAIRING_TIMEOUT_MS 200
+#define HDCP_2_2_DP_PAIRING_READ_TIMEOUT_MS 5
#define HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS 20
-#define HDCP_2_2_DP_LPRIME_TIMEOUT_MS 7
+#define HDCP_2_2_DP_LPRIME_TIMEOUT_MS 16
#define HDCP_2_2_RECVID_LIST_TIMEOUT_MS 3000
#define HDCP_2_2_STREAM_READY_TIMEOUT_MS 100
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index eb706342861d..f3a4b47b3986 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -1179,7 +1179,7 @@ struct drm_plane_helper_funcs {
* members in the plane structure.
*
* Drivers which always have their buffers pinned should use
- * drm_gem_fb_prepare_fb() for this hook.
+ * drm_gem_plane_helper_prepare_fb() for this hook.
*
* The helpers will call @cleanup_fb with matching arguments for every
* successful call to this hook.
@@ -1233,9 +1233,8 @@ struct drm_plane_helper_funcs {
* NOTE:
*
* This function is called in the check phase of an atomic update. The
- * driver is not allowed to change anything outside of the free-standing
- * state objects passed-in or assembled in the overall &drm_atomic_state
- * update tracking structure.
+ * driver is not allowed to change anything outside of the
+ * &drm_atomic_state update tracking structure.
*
* RETURNS:
*
@@ -1245,7 +1244,7 @@ struct drm_plane_helper_funcs {
* deadlock.
*/
int (*atomic_check)(struct drm_plane *plane,
- struct drm_plane_state *state);
+ struct drm_atomic_state *state);
/**
* @atomic_update:
@@ -1263,7 +1262,7 @@ struct drm_plane_helper_funcs {
* transitional plane helpers, but it is optional.
*/
void (*atomic_update)(struct drm_plane *plane,
- struct drm_plane_state *old_state);
+ struct drm_atomic_state *state);
/**
* @atomic_disable:
*
@@ -1287,14 +1286,14 @@ struct drm_plane_helper_funcs {
* transitional plane helpers, but it is optional.
*/
void (*atomic_disable)(struct drm_plane *plane,
- struct drm_plane_state *old_state);
+ struct drm_atomic_state *state);
/**
* @atomic_async_check:
*
- * Drivers should set this function pointer to check if the plane state
- * can be updated in a async fashion. Here async means "not vblank
- * synchronized".
+ * Drivers should set this function pointer to check if the plane's
+ * atomic state can be updated in a async fashion. Here async means
+ * "not vblank synchronized".
*
* This hook is called by drm_atomic_async_check() to establish if a
* given update can be committed asynchronously, that is, if it can
@@ -1306,7 +1305,7 @@ struct drm_plane_helper_funcs {
* can not be applied in asynchronous manner.
*/
int (*atomic_async_check)(struct drm_plane *plane,
- struct drm_plane_state *state);
+ struct drm_atomic_state *state);
/**
* @atomic_async_update:
@@ -1322,11 +1321,9 @@ struct drm_plane_helper_funcs {
* update won't happen if there is an outstanding commit modifying
* the same plane.
*
- * Note that unlike &drm_plane_helper_funcs.atomic_update this hook
- * takes the new &drm_plane_state as parameter. When doing async_update
- * drivers shouldn't replace the &drm_plane_state but update the
- * current one with the new plane configurations in the new
- * plane_state.
+ * When doing async_update drivers shouldn't replace the
+ * &drm_plane_state but update the current one with the new plane
+ * configurations in the new plane_state.
*
* Drivers should also swap the framebuffers between current plane
* state (&drm_plane.state) and new_state.
@@ -1345,7 +1342,7 @@ struct drm_plane_helper_funcs {
* for deferring if needed, until a common solution is created.
*/
void (*atomic_async_update)(struct drm_plane *plane,
- struct drm_plane_state *new_state);
+ struct drm_atomic_state *state);
};
/**
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 8ef06ee1c8eb..1294610e84f4 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -79,8 +79,8 @@ struct drm_plane_state {
* preserved.
*
* Drivers should store any implicit fence in this from their
- * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_fb_prepare_fb()
- * and drm_gem_fb_simple_display_pipe_prepare_fb() for suitable helpers.
+ * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_plane_helper_prepare_fb()
+ * and drm_gem_simple_display_pipe_prepare_fb() for suitable helpers.
*/
struct dma_fence *fence;
@@ -538,10 +538,14 @@ struct drm_plane_funcs {
*
* For compatibility with legacy userspace, only overlay planes are made
* available to userspace by default. Userspace clients may set the
- * DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they
+ * &DRM_CLIENT_CAP_UNIVERSAL_PLANES client capability bit to indicate that they
* wish to receive a universal plane list containing all plane types. See also
* drm_for_each_legacy_plane().
*
+ * In addition to setting each plane's type, drivers need to setup the
+ * &drm_crtc.primary and optionally &drm_crtc.cursor pointers for legacy
+ * IOCTLs. See drm_crtc_init_with_planes().
+ *
* WARNING: The values of this enum is UABI since they're exposed in the "type"
* property.
*/
@@ -557,19 +561,20 @@ enum drm_plane_type {
/**
* @DRM_PLANE_TYPE_PRIMARY:
*
- * Primary planes represent a "main" plane for a CRTC. Primary planes
- * are the planes operated upon by CRTC modesetting and flipping
- * operations described in the &drm_crtc_funcs.page_flip and
- * &drm_crtc_funcs.set_config hooks.
+ * A primary plane attached to a CRTC is the most likely to be able to
+ * light up the CRTC when no scaling/cropping is used and the plane
+ * covers the whole CRTC.
*/
DRM_PLANE_TYPE_PRIMARY,
/**
* @DRM_PLANE_TYPE_CURSOR:
*
- * Cursor planes represent a "cursor" plane for a CRTC. Cursor planes
- * are the planes operated upon by the DRM_IOCTL_MODE_CURSOR and
- * DRM_IOCTL_MODE_CURSOR2 IOCTLs.
+ * A cursor plane attached to a CRTC is more likely to be able to be
+ * enabled when no scaling/cropping is used and the framebuffer has the
+ * size indicated by &drm_mode_config.cursor_width and
+ * &drm_mode_config.cursor_height. Additionally, if the driver doesn't
+ * support modifiers, the framebuffer should have a linear layout.
*/
DRM_PLANE_TYPE_CURSOR,
};
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index f32d179e139d..a3c58c941bdc 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -524,16 +524,20 @@ void __drm_err(const char *format, ...);
#define DRM_DEBUG_DP(fmt, ...) \
__drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
-
-#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) \
-({ \
- static DEFINE_RATELIMIT_STATE(_rs, \
- DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- if (__ratelimit(&_rs)) \
- drm_dev_dbg(NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__); \
+#define __DRM_DEFINE_DBG_RATELIMITED(category, drm, fmt, ...) \
+({ \
+ static DEFINE_RATELIMIT_STATE(rs_, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST);\
+ const struct drm_device *drm_ = (drm); \
+ \
+ if (drm_debug_enabled(DRM_UT_ ## category) && __ratelimit(&rs_)) \
+ drm_dev_printk(drm_ ? drm_->dev : NULL, KERN_DEBUG, fmt, ## __VA_ARGS__); \
})
+#define drm_dbg_kms_ratelimited(drm, fmt, ...) \
+ __DRM_DEFINE_DBG_RATELIMITED(KMS, drm, fmt, ## __VA_ARGS__)
+
+#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) drm_dbg_kms_ratelimited(NULL, fmt, ## __VA_ARGS__)
+
/*
* struct drm_device based WARNs
*
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index e6dbf3161c2f..ef9944e9c5fc 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -117,7 +117,7 @@ struct drm_simple_display_pipe_funcs {
* more details.
*
* Drivers which always have their buffers pinned should use
- * drm_gem_fb_simple_display_pipe_prepare_fb() for this hook.
+ * drm_gem_simple_display_pipe_prepare_fb() for this hook.
*/
int (*prepare_fb)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *plane_state);
@@ -149,6 +149,33 @@ struct drm_simple_display_pipe_funcs {
* more details.
*/
void (*disable_vblank)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @reset_plane:
+ *
+ * Optional, called by &drm_plane_funcs.reset. Please read the
+ * documentation for the &drm_plane_funcs.reset hook for more details.
+ */
+ void (*reset_plane)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @duplicate_plane_state:
+ *
+ * Optional, called by &drm_plane_funcs.atomic_duplicate_state. Please
+ * read the documentation for the &drm_plane_funcs.atomic_duplicate_state
+ * hook for more details.
+ */
+ struct drm_plane_state * (*duplicate_plane_state)(struct drm_simple_display_pipe *pipe);
+
+ /**
+ * @destroy_plane_state:
+ *
+ * Optional, called by &drm_plane_funcs.atomic_destroy_state. Please
+ * read the documentation for the &drm_plane_funcs.atomic_destroy_state
+ * hook for more details.
+ */
+ void (*destroy_plane_state)(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state);
};
/**
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index dd125f8c766c..733a3e2d1d10 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -247,7 +247,6 @@ void drm_crtc_vblank_off(struct drm_crtc *crtc);
void drm_crtc_vblank_reset(struct drm_crtc *crtc);
void drm_crtc_vblank_on(struct drm_crtc *crtc);
u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
-void drm_vblank_restore(struct drm_device *dev, unsigned int pipe);
void drm_crtc_vblank_restore(struct drm_crtc *crtc);
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 975e8a67947f..10225a0a35d0 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -206,6 +206,12 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
return s_job && atomic_inc_return(&s_job->karma) > threshold;
}
+enum drm_gpu_sched_stat {
+ DRM_GPU_SCHED_STAT_NONE, /* Reserve 0 */
+ DRM_GPU_SCHED_STAT_NOMINAL,
+ DRM_GPU_SCHED_STAT_ENODEV,
+};
+
/**
* struct drm_sched_backend_ops
*
@@ -230,10 +236,16 @@ struct drm_sched_backend_ops {
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
/**
- * @timedout_job: Called when a job has taken too long to execute,
- * to trigger GPU recovery.
+ * @timedout_job: Called when a job has taken too long to execute,
+ * to trigger GPU recovery.
+ *
+ * Return DRM_GPU_SCHED_STAT_NOMINAL, when all is normal,
+ * and the underlying driver has started or completed recovery.
+ *
+ * Return DRM_GPU_SCHED_STAT_ENODEV, if the device is no longer
+ * available, i.e. has been unplugged.
*/
- void (*timedout_job)(struct drm_sched_job *sched_job);
+ enum drm_gpu_sched_stat (*timedout_job)(struct drm_sched_job *sched_job);
/**
* @free_job: Called once the job's finished fence has been signaled
@@ -265,6 +277,7 @@ struct drm_sched_backend_ops {
* @hang_limit: once the hangs by a job crosses this limit then it is marked
* guilty and it will be considered for scheduling further.
* @score: score to help loadbalancer pick a idle sched
+ * @_score: score used when the driver doesn't provide one
* @ready: marks if the underlying HW is ready to work
* @free_guilty: A hit to time out handler to free the guilty job.
*
@@ -285,7 +298,8 @@ struct drm_gpu_scheduler {
struct list_head pending_list;
spinlock_t job_list_lock;
int hang_limit;
- atomic_t score;
+ atomic_t *score;
+ atomic_t _score;
bool ready;
bool free_guilty;
};
@@ -293,7 +307,7 @@ struct drm_gpu_scheduler {
int drm_sched_init(struct drm_gpu_scheduler *sched,
const struct drm_sched_backend_ops *ops,
uint32_t hw_submission, unsigned hang_limit, long timeout,
- const char *name);
+ atomic_t *score, const char *name);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_job_init(struct drm_sched_job *job,
@@ -308,7 +322,10 @@ void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
+void drm_sched_resubmit_jobs_ext(struct drm_gpu_scheduler *sched, int max);
void drm_sched_increase_karma(struct drm_sched_job *bad);
+void drm_sched_reset_karma(struct drm_sched_job *bad);
+void drm_sched_increase_karma_ext(struct drm_sched_job *bad, int type);
bool drm_sched_dependency_optimized(struct dma_fence* fence,
struct drm_sched_entity *entity);
void drm_sched_fault(struct drm_gpu_scheduler *sched);
diff --git a/include/drm/gud.h b/include/drm/gud.h
new file mode 100644
index 000000000000..0b46b54fe56e
--- /dev/null
+++ b/include/drm/gud.h
@@ -0,0 +1,333 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright 2020 Noralf Trønnes
+ */
+
+#ifndef __LINUX_GUD_H
+#define __LINUX_GUD_H
+
+#include <linux/types.h>
+
+/*
+ * struct gud_display_descriptor_req - Display descriptor
+ * @magic: Magic value GUD_DISPLAY_MAGIC
+ * @version: Protocol version
+ * @flags: Flags
+ * - STATUS_ON_SET: Always do a status request after a SET request.
+ * This is used by the Linux gadget driver since it has
+ * no way to control the status stage of a control OUT
+ * request that has a payload.
+ * - FULL_UPDATE: Always send the entire framebuffer when flushing changes.
+ * The GUD_REQ_SET_BUFFER request will not be sent
+ * before each bulk transfer, it will only be sent if the
+ * previous bulk transfer had failed. This gives the device
+ * a chance to reset its state machine if needed.
+ * This flag can not be used in combination with compression.
+ * @compression: Supported compression types
+ * - GUD_COMPRESSION_LZ4: LZ4 lossless compression.
+ * @max_buffer_size: Maximum buffer size the device can handle (optional).
+ * This is useful for devices that don't have a big enough
+ * buffer to decompress the entire framebuffer in one go.
+ * @min_width: Minimum pixel width the controller can handle
+ * @max_width: Maximum width
+ * @min_height: Minimum height
+ * @max_height: Maximum height
+ *
+ * Devices that have only one display mode will have min_width == max_width
+ * and min_height == max_height.
+ */
+struct gud_display_descriptor_req {
+ __le32 magic;
+#define GUD_DISPLAY_MAGIC 0x1d50614d
+ __u8 version;
+ __le32 flags;
+#define GUD_DISPLAY_FLAG_STATUS_ON_SET BIT(0)
+#define GUD_DISPLAY_FLAG_FULL_UPDATE BIT(1)
+ __u8 compression;
+#define GUD_COMPRESSION_LZ4 BIT(0)
+ __le32 max_buffer_size;
+ __le32 min_width;
+ __le32 max_width;
+ __le32 min_height;
+ __le32 max_height;
+} __packed;
+
+/*
+ * struct gud_property_req - Property
+ * @prop: Property
+ * @val: Value
+ */
+struct gud_property_req {
+ __le16 prop;
+ __le64 val;
+} __packed;
+
+/*
+ * struct gud_display_mode_req - Display mode
+ * @clock: Pixel clock in kHz
+ * @hdisplay: Horizontal display size
+ * @hsync_start: Horizontal sync start
+ * @hsync_end: Horizontal sync end
+ * @htotal: Horizontal total size
+ * @vdisplay: Vertical display size
+ * @vsync_start: Vertical sync start
+ * @vsync_end: Vertical sync end
+ * @vtotal: Vertical total size
+ * @flags: Bits 0-13 are the same as in the RandR protocol and also what DRM uses.
+ * The deprecated bits are reused for internal protocol flags leaving us
+ * free to follow DRM for the other bits in the future.
+ * - FLAG_PREFERRED: Set on the preferred display mode.
+ */
+struct gud_display_mode_req {
+ __le32 clock;
+ __le16 hdisplay;
+ __le16 hsync_start;
+ __le16 hsync_end;
+ __le16 htotal;
+ __le16 vdisplay;
+ __le16 vsync_start;
+ __le16 vsync_end;
+ __le16 vtotal;
+ __le32 flags;
+#define GUD_DISPLAY_MODE_FLAG_PHSYNC BIT(0)
+#define GUD_DISPLAY_MODE_FLAG_NHSYNC BIT(1)
+#define GUD_DISPLAY_MODE_FLAG_PVSYNC BIT(2)
+#define GUD_DISPLAY_MODE_FLAG_NVSYNC BIT(3)
+#define GUD_DISPLAY_MODE_FLAG_INTERLACE BIT(4)
+#define GUD_DISPLAY_MODE_FLAG_DBLSCAN BIT(5)
+#define GUD_DISPLAY_MODE_FLAG_CSYNC BIT(6)
+#define GUD_DISPLAY_MODE_FLAG_PCSYNC BIT(7)
+#define GUD_DISPLAY_MODE_FLAG_NCSYNC BIT(8)
+#define GUD_DISPLAY_MODE_FLAG_HSKEW BIT(9)
+/* BCast and PixelMultiplex are deprecated */
+#define GUD_DISPLAY_MODE_FLAG_DBLCLK BIT(12)
+#define GUD_DISPLAY_MODE_FLAG_CLKDIV2 BIT(13)
+#define GUD_DISPLAY_MODE_FLAG_USER_MASK \
+ (GUD_DISPLAY_MODE_FLAG_PHSYNC | GUD_DISPLAY_MODE_FLAG_NHSYNC | \
+ GUD_DISPLAY_MODE_FLAG_PVSYNC | GUD_DISPLAY_MODE_FLAG_NVSYNC | \
+ GUD_DISPLAY_MODE_FLAG_INTERLACE | GUD_DISPLAY_MODE_FLAG_DBLSCAN | \
+ GUD_DISPLAY_MODE_FLAG_CSYNC | GUD_DISPLAY_MODE_FLAG_PCSYNC | \
+ GUD_DISPLAY_MODE_FLAG_NCSYNC | GUD_DISPLAY_MODE_FLAG_HSKEW | \
+ GUD_DISPLAY_MODE_FLAG_DBLCLK | GUD_DISPLAY_MODE_FLAG_CLKDIV2)
+/* Internal protocol flags */
+#define GUD_DISPLAY_MODE_FLAG_PREFERRED BIT(10)
+} __packed;
+
+/*
+ * struct gud_connector_descriptor_req - Connector descriptor
+ * @connector_type: Connector type (GUD_CONNECTOR_TYPE_*).
+ * If the host doesn't support the type it should fall back to PANEL.
+ * @flags: Flags
+ * - POLL_STATUS: Connector status can change (polled every 10 seconds)
+ * - INTERLACE: Interlaced modes are supported
+ * - DOUBLESCAN: Doublescan modes are supported
+ */
+struct gud_connector_descriptor_req {
+ __u8 connector_type;
+#define GUD_CONNECTOR_TYPE_PANEL 0
+#define GUD_CONNECTOR_TYPE_VGA 1
+#define GUD_CONNECTOR_TYPE_COMPOSITE 2
+#define GUD_CONNECTOR_TYPE_SVIDEO 3
+#define GUD_CONNECTOR_TYPE_COMPONENT 4
+#define GUD_CONNECTOR_TYPE_DVI 5
+#define GUD_CONNECTOR_TYPE_DISPLAYPORT 6
+#define GUD_CONNECTOR_TYPE_HDMI 7
+ __le32 flags;
+#define GUD_CONNECTOR_FLAGS_POLL_STATUS BIT(0)
+#define GUD_CONNECTOR_FLAGS_INTERLACE BIT(1)
+#define GUD_CONNECTOR_FLAGS_DOUBLESCAN BIT(2)
+} __packed;
+
+/*
+ * struct gud_set_buffer_req - Set buffer transfer info
+ * @x: X position of rectangle
+ * @y: Y position
+ * @width: Pixel width of rectangle
+ * @height: Pixel height
+ * @length: Buffer length in bytes
+ * @compression: Transfer compression
+ * @compressed_length: Compressed buffer length
+ *
+ * This request is issued right before the bulk transfer.
+ * @x, @y, @width and @height specifies the rectangle where the buffer should be
+ * placed inside the framebuffer.
+ */
+struct gud_set_buffer_req {
+ __le32 x;
+ __le32 y;
+ __le32 width;
+ __le32 height;
+ __le32 length;
+ __u8 compression;
+ __le32 compressed_length;
+} __packed;
+
+/*
+ * struct gud_state_req - Display state
+ * @mode: Display mode
+ * @format: Pixel format GUD_PIXEL_FORMAT_*
+ * @connector: Connector index
+ * @properties: Array of properties
+ *
+ * The entire state is transferred each time there's a change.
+ */
+struct gud_state_req {
+ struct gud_display_mode_req mode;
+ __u8 format;
+ __u8 connector;
+ struct gud_property_req properties[];
+} __packed;
+
+/* List of supported connector properties: */
+
+/* Margins in pixels to deal with overscan, range 0-100 */
+#define GUD_PROPERTY_TV_LEFT_MARGIN 1
+#define GUD_PROPERTY_TV_RIGHT_MARGIN 2
+#define GUD_PROPERTY_TV_TOP_MARGIN 3
+#define GUD_PROPERTY_TV_BOTTOM_MARGIN 4
+#define GUD_PROPERTY_TV_MODE 5
+/* Brightness in percent, range 0-100 */
+#define GUD_PROPERTY_TV_BRIGHTNESS 6
+/* Contrast in percent, range 0-100 */
+#define GUD_PROPERTY_TV_CONTRAST 7
+/* Flicker reduction in percent, range 0-100 */
+#define GUD_PROPERTY_TV_FLICKER_REDUCTION 8
+/* Overscan in percent, range 0-100 */
+#define GUD_PROPERTY_TV_OVERSCAN 9
+/* Saturation in percent, range 0-100 */
+#define GUD_PROPERTY_TV_SATURATION 10
+/* Hue in percent, range 0-100 */
+#define GUD_PROPERTY_TV_HUE 11
+
+/*
+ * Backlight brightness is in the range 0-100 inclusive. The value represents the human perceptual
+ * brightness and not a linear PWM value. 0 is minimum brightness which should not turn the
+ * backlight completely off. The DPMS connector property should be used to control power which will
+ * trigger a GUD_REQ_SET_DISPLAY_ENABLE request.
+ *
+ * This does not map to a DRM property, it is used with the backlight device.
+ */
+#define GUD_PROPERTY_BACKLIGHT_BRIGHTNESS 12
+
+/* List of supported properties that are not connector propeties: */
+
+/*
+ * Plane rotation. Should return the supported bitmask on
+ * GUD_REQ_GET_PROPERTIES. GUD_ROTATION_0 is mandatory.
+ *
+ * Note: This is not display rotation so 90/270 will need scaling to make it fit (unless squared).
+ */
+#define GUD_PROPERTY_ROTATION 50
+ #define GUD_ROTATION_0 BIT(0)
+ #define GUD_ROTATION_90 BIT(1)
+ #define GUD_ROTATION_180 BIT(2)
+ #define GUD_ROTATION_270 BIT(3)
+ #define GUD_ROTATION_REFLECT_X BIT(4)
+ #define GUD_ROTATION_REFLECT_Y BIT(5)
+ #define GUD_ROTATION_MASK (GUD_ROTATION_0 | GUD_ROTATION_90 | \
+ GUD_ROTATION_180 | GUD_ROTATION_270 | \
+ GUD_ROTATION_REFLECT_X | GUD_ROTATION_REFLECT_Y)
+
+/* USB Control requests: */
+
+/* Get status from the last GET/SET control request. Value is u8. */
+#define GUD_REQ_GET_STATUS 0x00
+ /* Status values: */
+ #define GUD_STATUS_OK 0x00
+ #define GUD_STATUS_BUSY 0x01
+ #define GUD_STATUS_REQUEST_NOT_SUPPORTED 0x02
+ #define GUD_STATUS_PROTOCOL_ERROR 0x03
+ #define GUD_STATUS_INVALID_PARAMETER 0x04
+ #define GUD_STATUS_ERROR 0x05
+
+/* Get display descriptor as a &gud_display_descriptor_req */
+#define GUD_REQ_GET_DESCRIPTOR 0x01
+
+/* Get supported pixel formats as a byte array of GUD_PIXEL_FORMAT_* */
+#define GUD_REQ_GET_FORMATS 0x40
+ #define GUD_FORMATS_MAX_NUM 32
+ /* R1 is a 1-bit monochrome transfer format presented to userspace as XRGB8888 */
+ #define GUD_PIXEL_FORMAT_R1 0x01
+ #define GUD_PIXEL_FORMAT_XRGB1111 0x20
+ #define GUD_PIXEL_FORMAT_RGB565 0x40
+ #define GUD_PIXEL_FORMAT_XRGB8888 0x80
+ #define GUD_PIXEL_FORMAT_ARGB8888 0x81
+
+/*
+ * Get supported properties that are not connector propeties as a &gud_property_req array.
+ * gud_property_req.val often contains the initial value for the property.
+ */
+#define GUD_REQ_GET_PROPERTIES 0x41
+ #define GUD_PROPERTIES_MAX_NUM 32
+
+/* Connector requests have the connector index passed in the wValue field */
+
+/* Get connector descriptors as an array of &gud_connector_descriptor_req */
+#define GUD_REQ_GET_CONNECTORS 0x50
+ #define GUD_CONNECTORS_MAX_NUM 32
+
+/*
+ * Get properties supported by the connector as a &gud_property_req array.
+ * gud_property_req.val often contains the initial value for the property.
+ */
+#define GUD_REQ_GET_CONNECTOR_PROPERTIES 0x51
+ #define GUD_CONNECTOR_PROPERTIES_MAX_NUM 32
+
+/*
+ * Issued when there's a TV_MODE property present.
+ * Gets an array of the supported TV_MODE names each entry of length
+ * GUD_CONNECTOR_TV_MODE_NAME_LEN. Names must be NUL-terminated.
+ */
+#define GUD_REQ_GET_CONNECTOR_TV_MODE_VALUES 0x52
+ #define GUD_CONNECTOR_TV_MODE_NAME_LEN 16
+ #define GUD_CONNECTOR_TV_MODE_MAX_NUM 16
+
+/* When userspace checks connector status, this is issued first, not used for poll requests. */
+#define GUD_REQ_SET_CONNECTOR_FORCE_DETECT 0x53
+
+/*
+ * Get connector status. Value is u8.
+ *
+ * Userspace will get a HOTPLUG uevent if one of the following is true:
+ * - Connection status has changed since last
+ * - CHANGED is set
+ */
+#define GUD_REQ_GET_CONNECTOR_STATUS 0x54
+ #define GUD_CONNECTOR_STATUS_DISCONNECTED 0x00
+ #define GUD_CONNECTOR_STATUS_CONNECTED 0x01
+ #define GUD_CONNECTOR_STATUS_UNKNOWN 0x02
+ #define GUD_CONNECTOR_STATUS_CONNECTED_MASK 0x03
+ #define GUD_CONNECTOR_STATUS_CHANGED BIT(7)
+
+/*
+ * Display modes can be fetched as either EDID data or an array of &gud_display_mode_req.
+ *
+ * If GUD_REQ_GET_CONNECTOR_MODES returns zero, EDID is used to create display modes.
+ * If both display modes and EDID are returned, EDID is just passed on to userspace
+ * in the EDID connector property.
+ */
+
+/* Get &gud_display_mode_req array of supported display modes */
+#define GUD_REQ_GET_CONNECTOR_MODES 0x55
+ #define GUD_CONNECTOR_MAX_NUM_MODES 128
+
+/* Get Extended Display Identification Data */
+#define GUD_REQ_GET_CONNECTOR_EDID 0x56
+ #define GUD_CONNECTOR_MAX_EDID_LEN 2048
+
+/* Set buffer properties before bulk transfer as &gud_set_buffer_req */
+#define GUD_REQ_SET_BUFFER 0x60
+
+/* Check display configuration as &gud_state_req */
+#define GUD_REQ_SET_STATE_CHECK 0x61
+
+/* Apply the previous STATE_CHECK configuration */
+#define GUD_REQ_SET_STATE_COMMIT 0x62
+
+/* Enable/disable the display controller, value is u8: 0/1 */
+#define GUD_REQ_SET_CONTROLLER_ENABLE 0x63
+
+/* Enable/disable display/output (DPMS), value is u8: 0/1 */
+#define GUD_REQ_SET_DISPLAY_ENABLE 0x64
+
+#endif
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 931e46191047..ebd0dd1c35b3 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -634,4 +634,15 @@
INTEL_VGA_DEVICE(0x4907, info), \
INTEL_VGA_DEVICE(0x4908, info)
+/* ADL-S */
+#define INTEL_ADLS_IDS(info) \
+ INTEL_VGA_DEVICE(0x4680, info), \
+ INTEL_VGA_DEVICE(0x4681, info), \
+ INTEL_VGA_DEVICE(0x4682, info), \
+ INTEL_VGA_DEVICE(0x4683, info), \
+ INTEL_VGA_DEVICE(0x4690, info), \
+ INTEL_VGA_DEVICE(0x4691, info), \
+ INTEL_VGA_DEVICE(0x4692, info), \
+ INTEL_VGA_DEVICE(0x4693, info)
+
#endif /* _I915_PCIIDS_H */
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index b8ca13664fa2..2155e2e38aec 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -44,9 +44,9 @@
#include "ttm_resource.h"
-struct ttm_bo_global;
+struct ttm_global;
-struct ttm_bo_device;
+struct ttm_device;
struct dma_buf_map;
@@ -88,7 +88,6 @@ struct ttm_tt;
* @type: The bo type.
* @destroy: Destruction function. If NULL, kfree is used.
* @num_pages: Actual number of pages.
- * @acc_size: Accounted size for this object.
* @kref: Reference count of this buffer object. When this refcount reaches
* zero, the object is destroyed or put on the delayed delete list.
* @mem: structure describing current placement.
@@ -122,10 +121,9 @@ struct ttm_buffer_object {
* Members constant at init.
*/
- struct ttm_bo_device *bdev;
+ struct ttm_device *bdev;
enum ttm_bo_type type;
void (*destroy) (struct ttm_buffer_object *);
- size_t acc_size;
/**
* Members not needing protection.
@@ -146,7 +144,6 @@ struct ttm_buffer_object {
struct list_head lru;
struct list_head ddestroy;
- struct list_head swap;
/**
* Members protected by a bo reservation.
@@ -313,7 +310,7 @@ void ttm_bo_put(struct ttm_buffer_object *bo);
* @bulk: optional bulk move structure to remember BO positions
*
* Move this BO to the tail of all lru lists used to lookup and reserve an
- * object. This function must be called with struct ttm_bo_global::lru_lock
+ * object. This function must be called with struct ttm_global::lru_lock
* held, and is used to make a BO less likely to be considered for eviction.
*/
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
@@ -326,7 +323,7 @@ void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo,
* @bulk: bulk move structure
*
* Bulk move BOs to the LRU tail, only valid to use when driver makes sure that
- * BO order never changes. Should be called with ttm_bo_global::lru_lock held.
+ * BO order never changes. Should be called with ttm_global::lru_lock held.
*/
void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk);
@@ -337,14 +334,14 @@ void ttm_bo_bulk_move_lru_tail(struct ttm_lru_bulk_move *bulk);
* Returns
* True if the workqueue was queued at the time
*/
-int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev);
+int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev);
/**
* ttm_bo_unlock_delayed_workqueue
*
* Allows the delayed workqueue to run.
*/
-void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched);
+void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched);
/**
* ttm_bo_eviction_valuable
@@ -357,21 +354,16 @@ void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched);
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place);
-size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
- unsigned long bo_size,
- unsigned struct_size);
-
/**
* ttm_bo_init_reserved
*
- * @bdev: Pointer to a ttm_bo_device struct.
+ * @bdev: Pointer to a ttm_device struct.
* @bo: Pointer to a ttm_buffer_object to be initialized.
* @size: Requested size of buffer object.
* @type: Requested type of buffer object.
* @flags: Initial placement flags.
* @page_alignment: Data alignment in pages.
* @ctx: TTM operation context for memory allocation.
- * @acc_size: Accounted size for this object.
* @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree().
*
@@ -396,20 +388,19 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
* -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
*/
-int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
+int ttm_bo_init_reserved(struct ttm_device *bdev,
struct ttm_buffer_object *bo,
size_t size, enum ttm_bo_type type,
struct ttm_placement *placement,
uint32_t page_alignment,
struct ttm_operation_ctx *ctx,
- size_t acc_size, struct sg_table *sg,
- struct dma_resv *resv,
+ struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *));
/**
* ttm_bo_init
*
- * @bdev: Pointer to a ttm_bo_device struct.
+ * @bdev: Pointer to a ttm_device struct.
* @bo: Pointer to a ttm_buffer_object to be initialized.
* @size: Requested size of buffer object.
* @type: Requested type of buffer object.
@@ -421,7 +412,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* holds a pointer to a persistent shmem object. Typically, this would
* point to the shmem object backing a GEM object if TTM is used to back a
* GEM user interface.
- * @acc_size: Accounted size for this object.
* @resv: Pointer to a dma_resv, or NULL to let ttm allocate one.
* @destroy: Destroy function. Use NULL for kfree().
*
@@ -443,10 +433,10 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
* -EINVAL: Invalid placement flags.
* -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
*/
-int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo,
+int ttm_bo_init(struct ttm_device *bdev, struct ttm_buffer_object *bo,
size_t size, enum ttm_bo_type type,
struct ttm_placement *placement,
- uint32_t page_alignment, bool interrubtible, size_t acc_size,
+ uint32_t page_alignment, bool interrubtible,
struct sg_table *sg, struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *));
@@ -537,18 +527,18 @@ int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
*
* @filp: filp as input from the mmap method.
* @vma: vma as input from the mmap method.
- * @bdev: Pointer to the ttm_bo_device with the address space manager.
+ * @bdev: Pointer to the ttm_device with the address space manager.
*
* This function is intended to be called by the device mmap method.
* if the device address space is to be backed by the bo manager.
*/
int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
- struct ttm_bo_device *bdev);
+ struct ttm_device *bdev);
/**
* ttm_bo_io
*
- * @bdev: Pointer to the struct ttm_bo_device.
+ * @bdev: Pointer to the struct ttm_device.
* @filp: Pointer to the struct file attempting to read / write.
* @wbuf: User-space pointer to address of buffer to write. NULL on read.
* @rbuf: User-space pointer to address of buffer to read into.
@@ -565,11 +555,12 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
* the function may return -ERESTARTSYS if
* interrupted by a signal.
*/
-ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
+ssize_t ttm_bo_io(struct ttm_device *bdev, struct file *filp,
const char __user *wbuf, char __user *rbuf,
size_t count, loff_t *f_pos, bool write);
-int ttm_bo_swapout(struct ttm_operation_ctx *ctx);
+int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
+ gfp_t gfp_flags);
/**
* ttm_bo_uses_embedded_gem_object - check if the given bo uses the
@@ -619,7 +610,7 @@ static inline void ttm_bo_unpin(struct ttm_buffer_object *bo)
WARN_ON_ONCE(true);
}
-int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+int ttm_mem_evict_first(struct ttm_device *bdev,
struct ttm_resource_manager *man,
const struct ttm_place *place,
struct ttm_operation_ctx *ctx,
@@ -644,5 +635,6 @@ void ttm_bo_vm_close(struct vm_area_struct *vma);
int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
void *buf, int len, int write);
+bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all);
#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 423348414c59..dbccac957f8f 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -37,302 +37,14 @@
#include <linux/spinlock.h>
#include <linux/dma-resv.h>
+#include <drm/ttm/ttm_device.h>
+
#include "ttm_bo_api.h"
-#include "ttm_memory.h"
#include "ttm_placement.h"
#include "ttm_tt.h"
#include "ttm_pool.h"
/**
- * struct ttm_bo_driver
- *
- * @create_ttm_backend_entry: Callback to create a struct ttm_backend.
- * @evict_flags: Callback to obtain placement flags when a buffer is evicted.
- * @move: Callback for a driver to hook in accelerated functions to
- * move a buffer.
- * If set to NULL, a potentially slow memcpy() move is used.
- */
-
-struct ttm_bo_driver {
- /**
- * ttm_tt_create
- *
- * @bo: The buffer object to create the ttm for.
- * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
- *
- * Create a struct ttm_tt to back data with system memory pages.
- * No pages are actually allocated.
- * Returns:
- * NULL: Out of memory.
- */
- struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
- uint32_t page_flags);
-
- /**
- * ttm_tt_populate
- *
- * @ttm: The struct ttm_tt to contain the backing pages.
- *
- * Allocate all backing pages
- * Returns:
- * -ENOMEM: Out of memory.
- */
- int (*ttm_tt_populate)(struct ttm_bo_device *bdev,
- struct ttm_tt *ttm,
- struct ttm_operation_ctx *ctx);
-
- /**
- * ttm_tt_unpopulate
- *
- * @ttm: The struct ttm_tt to contain the backing pages.
- *
- * Free all backing page
- */
- void (*ttm_tt_unpopulate)(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
-
- /**
- * ttm_tt_destroy
- *
- * @bdev: Pointer to a ttm device
- * @ttm: Pointer to a struct ttm_tt.
- *
- * Destroy the backend. This will be call back from ttm_tt_destroy so
- * don't call ttm_tt_destroy from the callback or infinite loop.
- */
- void (*ttm_tt_destroy)(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
-
- /**
- * struct ttm_bo_driver member eviction_valuable
- *
- * @bo: the buffer object to be evicted
- * @place: placement we need room for
- *
- * Check with the driver if it is valuable to evict a BO to make room
- * for a certain placement.
- */
- bool (*eviction_valuable)(struct ttm_buffer_object *bo,
- const struct ttm_place *place);
- /**
- * struct ttm_bo_driver member evict_flags:
- *
- * @bo: the buffer object to be evicted
- *
- * Return the bo flags for a buffer which is not mapped to the hardware.
- * These will be placed in proposed_flags so that when the move is
- * finished, they'll end up in bo->mem.flags
- * This should not cause multihop evictions, and the core will warn
- * if one is proposed.
- */
-
- void (*evict_flags)(struct ttm_buffer_object *bo,
- struct ttm_placement *placement);
-
- /**
- * struct ttm_bo_driver member move:
- *
- * @bo: the buffer to move
- * @evict: whether this motion is evicting the buffer from
- * the graphics address space
- * @ctx: context for this move with parameters
- * @new_mem: the new memory region receiving the buffer
- @ @hop: placement for driver directed intermediate hop
- *
- * Move a buffer between two memory regions.
- * Returns errno -EMULTIHOP if driver requests a hop
- */
- int (*move)(struct ttm_buffer_object *bo, bool evict,
- struct ttm_operation_ctx *ctx,
- struct ttm_resource *new_mem,
- struct ttm_place *hop);
-
- /**
- * struct ttm_bo_driver_member verify_access
- *
- * @bo: Pointer to a buffer object.
- * @filp: Pointer to a struct file trying to access the object.
- *
- * Called from the map / write / read methods to verify that the
- * caller is permitted to access the buffer object.
- * This member may be set to NULL, which will refuse this kind of
- * access for all buffer objects.
- * This function should return 0 if access is granted, -EPERM otherwise.
- */
- int (*verify_access)(struct ttm_buffer_object *bo,
- struct file *filp);
-
- /**
- * Hook to notify driver about a resource delete.
- */
- void (*delete_mem_notify)(struct ttm_buffer_object *bo);
-
- /**
- * notify the driver that we're about to swap out this bo
- */
- void (*swap_notify)(struct ttm_buffer_object *bo);
-
- /**
- * Driver callback on when mapping io memory (for bo_move_memcpy
- * for instance). TTM will take care to call io_mem_free whenever
- * the mapping is not use anymore. io_mem_reserve & io_mem_free
- * are balanced.
- */
- int (*io_mem_reserve)(struct ttm_bo_device *bdev,
- struct ttm_resource *mem);
- void (*io_mem_free)(struct ttm_bo_device *bdev,
- struct ttm_resource *mem);
-
- /**
- * Return the pfn for a given page_offset inside the BO.
- *
- * @bo: the BO to look up the pfn for
- * @page_offset: the offset to look up
- */
- unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
- unsigned long page_offset);
-
- /**
- * Read/write memory buffers for ptrace access
- *
- * @bo: the BO to access
- * @offset: the offset from the start of the BO
- * @buf: pointer to source/destination buffer
- * @len: number of bytes to copy
- * @write: whether to read (0) from or write (non-0) to BO
- *
- * If successful, this function should return the number of
- * bytes copied, -EIO otherwise. If the number of bytes
- * returned is < len, the function may be called again with
- * the remainder of the buffer to copy.
- */
- int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
- void *buf, int len, int write);
-
- /**
- * struct ttm_bo_driver member del_from_lru_notify
- *
- * @bo: the buffer object deleted from lru
- *
- * notify driver that a BO was deleted from LRU.
- */
- void (*del_from_lru_notify)(struct ttm_buffer_object *bo);
-
- /**
- * Notify the driver that we're about to release a BO
- *
- * @bo: BO that is about to be released
- *
- * Gives the driver a chance to do any cleanup, including
- * adding fences that may force a delayed delete
- */
- void (*release_notify)(struct ttm_buffer_object *bo);
-};
-
-/**
- * struct ttm_bo_global - Buffer object driver global data.
- *
- * @dummy_read_page: Pointer to a dummy page used for mapping requests
- * of unpopulated pages.
- * @shrink: A shrink callback object used for buffer object swap.
- * @device_list_mutex: Mutex protecting the device list.
- * This mutex is held while traversing the device list for pm options.
- * @lru_lock: Spinlock protecting the bo subsystem lru lists.
- * @device_list: List of buffer object devices.
- * @swap_lru: Lru list of buffer objects used for swapping.
- */
-
-extern struct ttm_bo_global {
-
- /**
- * Constant after init.
- */
-
- struct kobject kobj;
- struct page *dummy_read_page;
- spinlock_t lru_lock;
-
- /**
- * Protected by ttm_global_mutex.
- */
- struct list_head device_list;
-
- /**
- * Protected by the lru_lock.
- */
- struct list_head swap_lru[TTM_MAX_BO_PRIORITY];
-
- /**
- * Internal protection.
- */
- atomic_t bo_count;
-} ttm_bo_glob;
-
-
-#define TTM_NUM_MEM_TYPES 8
-
-/**
- * struct ttm_bo_device - Buffer object driver device-specific data.
- *
- * @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
- * @man: An array of resource_managers.
- * @vma_manager: Address space manager (pointer)
- * lru_lock: Spinlock that protects the buffer+device lru lists and
- * ddestroy lists.
- * @dev_mapping: A pointer to the struct address_space representing the
- * device address space.
- * @wq: Work queue structure for the delayed delete workqueue.
- *
- */
-
-struct ttm_bo_device {
-
- /*
- * Constant after bo device init / atomic.
- */
- struct list_head device_list;
- struct ttm_bo_driver *driver;
- /*
- * access via ttm_manager_type.
- */
- struct ttm_resource_manager sysman;
- struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES];
- /*
- * Protected by internal locks.
- */
- struct drm_vma_offset_manager *vma_manager;
- struct ttm_pool pool;
-
- /*
- * Protected by the global:lru lock.
- */
- struct list_head ddestroy;
-
- /*
- * Protected by load / firstopen / lastclose /unload sync.
- */
-
- struct address_space *dev_mapping;
-
- /*
- * Internal protection.
- */
-
- struct delayed_work wq;
-};
-
-static inline struct ttm_resource_manager *ttm_manager_type(struct ttm_bo_device *bdev,
- int mem_type)
-{
- return bdev->man_drv[mem_type];
-}
-
-static inline void ttm_set_driver_manager(struct ttm_bo_device *bdev,
- int type,
- struct ttm_resource_manager *manager)
-{
- bdev->man_drv[type] = manager;
-}
-
-/**
* struct ttm_lru_bulk_move_pos
*
* @first: first BO in the bulk move range
@@ -357,7 +69,6 @@ struct ttm_lru_bulk_move_pos {
struct ttm_lru_bulk_move {
struct ttm_lru_bulk_move_pos tt[TTM_MAX_BO_PRIORITY];
struct ttm_lru_bulk_move_pos vram[TTM_MAX_BO_PRIORITY];
- struct ttm_lru_bulk_move_pos swap[TTM_MAX_BO_PRIORITY];
};
/*
@@ -388,31 +99,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
struct ttm_resource *mem,
struct ttm_operation_ctx *ctx);
-int ttm_bo_device_release(struct ttm_bo_device *bdev);
-
-/**
- * ttm_bo_device_init
- *
- * @bdev: A pointer to a struct ttm_bo_device to initialize.
- * @glob: A pointer to an initialized struct ttm_bo_global.
- * @driver: A pointer to a struct ttm_bo_driver set up by the caller.
- * @dev: The core kernel device pointer for DMA mappings and allocations.
- * @mapping: The address space to use for this bo.
- * @vma_manager: A pointer to a vma manager.
- * @use_dma_alloc: If coherent DMA allocation API should be used.
- * @use_dma32: If we should use GFP_DMA32 for device memory allocations.
- *
- * Initializes a struct ttm_bo_device:
- * Returns:
- * !0: Failure.
- */
-int ttm_bo_device_init(struct ttm_bo_device *bdev,
- struct ttm_bo_driver *driver,
- struct device *dev,
- struct address_space *mapping,
- struct drm_vma_offset_manager *vma_manager,
- bool use_dma_alloc, bool use_dma32);
-
/**
* ttm_bo_unmap_virtual
*
@@ -494,9 +180,9 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
static inline void
ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
{
- spin_lock(&ttm_bo_glob.lru_lock);
+ spin_lock(&bo->bdev->lru_lock);
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
- spin_unlock(&ttm_bo_glob.lru_lock);
+ spin_unlock(&bo->bdev->lru_lock);
}
static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
@@ -538,9 +224,9 @@ static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
/*
* ttm_bo_util.c
*/
-int ttm_mem_io_reserve(struct ttm_bo_device *bdev,
+int ttm_mem_io_reserve(struct ttm_device *bdev,
struct ttm_resource *mem);
-void ttm_mem_io_free(struct ttm_bo_device *bdev,
+void ttm_mem_io_free(struct ttm_device *bdev,
struct ttm_resource *mem);
/**
@@ -631,7 +317,7 @@ void ttm_bo_tt_destroy(struct ttm_buffer_object *bo);
* Initialise a generic range manager for the selected memory type.
* The range manager is installed for this device in the type slot.
*/
-int ttm_range_man_init(struct ttm_bo_device *bdev,
+int ttm_range_man_init(struct ttm_device *bdev,
unsigned type, bool use_tt,
unsigned long p_size);
@@ -643,7 +329,7 @@ int ttm_range_man_init(struct ttm_bo_device *bdev,
*
* Remove the generic range manager from a slot and tear it down.
*/
-int ttm_range_man_fini(struct ttm_bo_device *bdev,
+int ttm_range_man_fini(struct ttm_device *bdev,
unsigned type);
#endif
diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h
new file mode 100644
index 000000000000..7c8f87bd52d3
--- /dev/null
+++ b/include/drm/ttm/ttm_device.h
@@ -0,0 +1,317 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Christian König
+ */
+
+#ifndef _TTM_DEVICE_H_
+#define _TTM_DEVICE_H_
+
+#include <linux/types.h>
+#include <linux/workqueue.h>
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_pool.h>
+
+#define TTM_NUM_MEM_TYPES 8
+
+struct ttm_device;
+struct ttm_placement;
+struct ttm_buffer_object;
+struct ttm_operation_ctx;
+
+/**
+ * struct ttm_global - Buffer object driver global data.
+ *
+ * @dummy_read_page: Pointer to a dummy page used for mapping requests
+ * of unpopulated pages.
+ * @shrink: A shrink callback object used for buffer object swap.
+ * @device_list_mutex: Mutex protecting the device list.
+ * This mutex is held while traversing the device list for pm options.
+ * @lru_lock: Spinlock protecting the bo subsystem lru lists.
+ * @device_list: List of buffer object devices.
+ * @swap_lru: Lru list of buffer objects used for swapping.
+ */
+extern struct ttm_global {
+
+ /**
+ * Constant after init.
+ */
+
+ struct page *dummy_read_page;
+
+ /**
+ * Protected by ttm_global_mutex.
+ */
+ struct list_head device_list;
+
+ /**
+ * Internal protection.
+ */
+ atomic_t bo_count;
+} ttm_glob;
+
+struct ttm_device_funcs {
+ /**
+ * ttm_tt_create
+ *
+ * @bo: The buffer object to create the ttm for.
+ * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags.
+ *
+ * Create a struct ttm_tt to back data with system memory pages.
+ * No pages are actually allocated.
+ * Returns:
+ * NULL: Out of memory.
+ */
+ struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo,
+ uint32_t page_flags);
+
+ /**
+ * ttm_tt_populate
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Allocate all backing pages
+ * Returns:
+ * -ENOMEM: Out of memory.
+ */
+ int (*ttm_tt_populate)(struct ttm_device *bdev,
+ struct ttm_tt *ttm,
+ struct ttm_operation_ctx *ctx);
+
+ /**
+ * ttm_tt_unpopulate
+ *
+ * @ttm: The struct ttm_tt to contain the backing pages.
+ *
+ * Free all backing page
+ */
+ void (*ttm_tt_unpopulate)(struct ttm_device *bdev,
+ struct ttm_tt *ttm);
+
+ /**
+ * ttm_tt_destroy
+ *
+ * @bdev: Pointer to a ttm device
+ * @ttm: Pointer to a struct ttm_tt.
+ *
+ * Destroy the backend. This will be call back from ttm_tt_destroy so
+ * don't call ttm_tt_destroy from the callback or infinite loop.
+ */
+ void (*ttm_tt_destroy)(struct ttm_device *bdev, struct ttm_tt *ttm);
+
+ /**
+ * struct ttm_bo_driver member eviction_valuable
+ *
+ * @bo: the buffer object to be evicted
+ * @place: placement we need room for
+ *
+ * Check with the driver if it is valuable to evict a BO to make room
+ * for a certain placement.
+ */
+ bool (*eviction_valuable)(struct ttm_buffer_object *bo,
+ const struct ttm_place *place);
+ /**
+ * struct ttm_bo_driver member evict_flags:
+ *
+ * @bo: the buffer object to be evicted
+ *
+ * Return the bo flags for a buffer which is not mapped to the hardware.
+ * These will be placed in proposed_flags so that when the move is
+ * finished, they'll end up in bo->mem.flags
+ * This should not cause multihop evictions, and the core will warn
+ * if one is proposed.
+ */
+
+ void (*evict_flags)(struct ttm_buffer_object *bo,
+ struct ttm_placement *placement);
+
+ /**
+ * struct ttm_bo_driver member move:
+ *
+ * @bo: the buffer to move
+ * @evict: whether this motion is evicting the buffer from
+ * the graphics address space
+ * @ctx: context for this move with parameters
+ * @new_mem: the new memory region receiving the buffer
+ @ @hop: placement for driver directed intermediate hop
+ *
+ * Move a buffer between two memory regions.
+ * Returns errno -EMULTIHOP if driver requests a hop
+ */
+ int (*move)(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_resource *new_mem,
+ struct ttm_place *hop);
+
+ /**
+ * struct ttm_bo_driver_member verify_access
+ *
+ * @bo: Pointer to a buffer object.
+ * @filp: Pointer to a struct file trying to access the object.
+ *
+ * Called from the map / write / read methods to verify that the
+ * caller is permitted to access the buffer object.
+ * This member may be set to NULL, which will refuse this kind of
+ * access for all buffer objects.
+ * This function should return 0 if access is granted, -EPERM otherwise.
+ */
+ int (*verify_access)(struct ttm_buffer_object *bo,
+ struct file *filp);
+
+ /**
+ * Hook to notify driver about a resource delete.
+ */
+ void (*delete_mem_notify)(struct ttm_buffer_object *bo);
+
+ /**
+ * notify the driver that we're about to swap out this bo
+ */
+ void (*swap_notify)(struct ttm_buffer_object *bo);
+
+ /**
+ * Driver callback on when mapping io memory (for bo_move_memcpy
+ * for instance). TTM will take care to call io_mem_free whenever
+ * the mapping is not use anymore. io_mem_reserve & io_mem_free
+ * are balanced.
+ */
+ int (*io_mem_reserve)(struct ttm_device *bdev,
+ struct ttm_resource *mem);
+ void (*io_mem_free)(struct ttm_device *bdev,
+ struct ttm_resource *mem);
+
+ /**
+ * Return the pfn for a given page_offset inside the BO.
+ *
+ * @bo: the BO to look up the pfn for
+ * @page_offset: the offset to look up
+ */
+ unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
+ unsigned long page_offset);
+
+ /**
+ * Read/write memory buffers for ptrace access
+ *
+ * @bo: the BO to access
+ * @offset: the offset from the start of the BO
+ * @buf: pointer to source/destination buffer
+ * @len: number of bytes to copy
+ * @write: whether to read (0) from or write (non-0) to BO
+ *
+ * If successful, this function should return the number of
+ * bytes copied, -EIO otherwise. If the number of bytes
+ * returned is < len, the function may be called again with
+ * the remainder of the buffer to copy.
+ */
+ int (*access_memory)(struct ttm_buffer_object *bo, unsigned long offset,
+ void *buf, int len, int write);
+
+ /**
+ * struct ttm_bo_driver member del_from_lru_notify
+ *
+ * @bo: the buffer object deleted from lru
+ *
+ * notify driver that a BO was deleted from LRU.
+ */
+ void (*del_from_lru_notify)(struct ttm_buffer_object *bo);
+
+ /**
+ * Notify the driver that we're about to release a BO
+ *
+ * @bo: BO that is about to be released
+ *
+ * Gives the driver a chance to do any cleanup, including
+ * adding fences that may force a delayed delete
+ */
+ void (*release_notify)(struct ttm_buffer_object *bo);
+};
+
+/**
+ * struct ttm_device - Buffer object driver device-specific data.
+ *
+ * @device_list: Our entry in the global device list.
+ * @funcs: Function table for the device.
+ * @sysman: Resource manager for the system domain.
+ * @man_drv: An array of resource_managers.
+ * @vma_manager: Address space manager.
+ * @pool: page pool for the device.
+ * @dev_mapping: A pointer to the struct address_space representing the
+ * device address space.
+ * @wq: Work queue structure for the delayed delete workqueue.
+ */
+struct ttm_device {
+ /*
+ * Constant after bo device init
+ */
+ struct list_head device_list;
+ struct ttm_device_funcs *funcs;
+
+ /*
+ * Access via ttm_manager_type.
+ */
+ struct ttm_resource_manager sysman;
+ struct ttm_resource_manager *man_drv[TTM_NUM_MEM_TYPES];
+
+ /*
+ * Protected by internal locks.
+ */
+ struct drm_vma_offset_manager *vma_manager;
+ struct ttm_pool pool;
+
+ /*
+ * Protection for the per manager LRU and ddestroy lists.
+ */
+ spinlock_t lru_lock;
+ struct list_head ddestroy;
+
+ /*
+ * Protected by load / firstopen / lastclose /unload sync.
+ */
+ struct address_space *dev_mapping;
+
+ /*
+ * Internal protection.
+ */
+ struct delayed_work wq;
+};
+
+int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
+int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
+ gfp_t gfp_flags);
+
+static inline struct ttm_resource_manager *
+ttm_manager_type(struct ttm_device *bdev, int mem_type)
+{
+ return bdev->man_drv[mem_type];
+}
+
+static inline void ttm_set_driver_manager(struct ttm_device *bdev, int type,
+ struct ttm_resource_manager *manager)
+{
+ bdev->man_drv[type] = manager;
+}
+
+int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
+ struct device *dev, struct address_space *mapping,
+ struct drm_vma_offset_manager *vma_manager,
+ bool use_dma_alloc, bool use_dma32);
+void ttm_device_fini(struct ttm_device *bdev);
+
+#endif
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
deleted file mode 100644
index c1f167881e33..000000000000
--- a/include/drm/ttm/ttm_memory.h
+++ /dev/null
@@ -1,95 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
-#ifndef TTM_MEMORY_H
-#define TTM_MEMORY_H
-
-#include <linux/workqueue.h>
-#include <linux/spinlock.h>
-#include <linux/bug.h>
-#include <linux/wait.h>
-#include <linux/errno.h>
-#include <linux/kobject.h>
-#include <linux/mm.h>
-#include "ttm_bo_api.h"
-
-/**
- * struct ttm_mem_global - Global memory accounting structure.
- *
- * @shrink: A single callback to shrink TTM memory usage. Extend this
- * to a linked list to be able to handle multiple callbacks when needed.
- * @swap_queue: A workqueue to handle shrinking in low memory situations. We
- * need a separate workqueue since it will spend a lot of time waiting
- * for the GPU, and this will otherwise block other workqueue tasks(?)
- * At this point we use only a single-threaded workqueue.
- * @work: The workqueue callback for the shrink queue.
- * @lock: Lock to protect the @shrink - and the memory accounting members,
- * that is, essentially the whole structure with some exceptions.
- * @lower_mem_limit: include lower limit of swap space and lower limit of
- * system memory.
- * @zones: Array of pointers to accounting zones.
- * @num_zones: Number of populated entries in the @zones array.
- * @zone_kernel: Pointer to the kernel zone.
- * @zone_highmem: Pointer to the highmem zone if there is one.
- * @zone_dma32: Pointer to the dma32 zone if there is one.
- *
- * Note that this structure is not per device. It should be global for all
- * graphics devices.
- */
-
-#define TTM_MEM_MAX_ZONES 2
-struct ttm_mem_zone;
-extern struct ttm_mem_global {
- struct kobject kobj;
- struct workqueue_struct *swap_queue;
- struct work_struct work;
- spinlock_t lock;
- uint64_t lower_mem_limit;
- struct ttm_mem_zone *zones[TTM_MEM_MAX_ZONES];
- unsigned int num_zones;
- struct ttm_mem_zone *zone_kernel;
-#ifdef CONFIG_HIGHMEM
- struct ttm_mem_zone *zone_highmem;
-#else
- struct ttm_mem_zone *zone_dma32;
-#endif
-} ttm_mem_glob;
-
-int ttm_mem_global_init(struct ttm_mem_global *glob);
-void ttm_mem_global_release(struct ttm_mem_global *glob);
-int ttm_mem_global_alloc(struct ttm_mem_global *glob, uint64_t memory,
- struct ttm_operation_ctx *ctx);
-void ttm_mem_global_free(struct ttm_mem_global *glob, uint64_t amount);
-int ttm_mem_global_alloc_page(struct ttm_mem_global *glob,
- struct page *page, uint64_t size,
- struct ttm_operation_ctx *ctx);
-void ttm_mem_global_free_page(struct ttm_mem_global *glob,
- struct page *page, uint64_t size);
-size_t ttm_round_pot(size_t size);
-bool ttm_check_under_lowerlimit(struct ttm_mem_global *glob, uint64_t num_pages,
- struct ttm_operation_ctx *ctx);
-#endif
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index da0ed7e8c915..6164ccf4f308 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -33,7 +33,7 @@
#define TTM_MAX_BO_PRIORITY 4U
-struct ttm_bo_device;
+struct ttm_device;
struct ttm_resource_manager;
struct ttm_resource;
struct ttm_place;
@@ -233,7 +233,7 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res);
void ttm_resource_manager_init(struct ttm_resource_manager *man,
unsigned long p_size);
-int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev,
+int ttm_resource_manager_evict_all(struct ttm_device *bdev,
struct ttm_resource_manager *man);
void ttm_resource_manager_debug(struct ttm_resource_manager *man,
diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h
index 6c8eb9a4de81..134d09ef7766 100644
--- a/include/drm/ttm/ttm_tt.h
+++ b/include/drm/ttm/ttm_tt.h
@@ -30,6 +30,7 @@
#include <linux/types.h>
#include <drm/ttm/ttm_caching.h>
+struct ttm_bo_device;
struct ttm_tt;
struct ttm_resource;
struct ttm_buffer_object;
@@ -118,14 +119,14 @@ void ttm_tt_fini(struct ttm_tt *ttm);
*
* Unbind, unpopulate and destroy common struct ttm_tt.
*/
-void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
+void ttm_tt_destroy(struct ttm_device *bdev, struct ttm_tt *ttm);
/**
* ttm_tt_destroy_common:
*
* Called from driver to destroy common path.
*/
-void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
+void ttm_tt_destroy_common(struct ttm_device *bdev, struct ttm_tt *ttm);
/**
* ttm_tt_swapin:
@@ -135,7 +136,8 @@ void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
* Swap in a previously swap out ttm_tt.
*/
int ttm_tt_swapin(struct ttm_tt *ttm);
-int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
+int ttm_tt_swapout(struct ttm_device *bdev, struct ttm_tt *ttm,
+ gfp_t gfp_flags);
/**
* ttm_tt_populate - allocate pages for a ttm
@@ -144,7 +146,7 @@ int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
*
* Calls the driver method to allocate pages for a ttm
*/
-int ttm_tt_populate(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
+int ttm_tt_populate(struct ttm_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx);
/**
* ttm_tt_unpopulate - free pages from a ttm
@@ -153,7 +155,9 @@ int ttm_tt_populate(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_o
*
* Calls the driver method to free all pages from a ttm
*/
-void ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm);
+void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm);
+
+void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages);
#if IS_ENABLED(CONFIG_AGP)
#include <linux/agp_backend.h>