From 30c637151cfac8da3588f3773462e705a4ff2f59 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 20 Jul 2022 10:30:56 +0200 Subject: drm/plane-helper: Export individual helpers Export the individual plane helpers that make up the plane functions and align the naming with other helpers. The plane helpers are for non-atomic modesetting and exporting them will simplify a later conversion of drivers to atomic modesetting. With struct drm_plane_funcs removed from drm_plane_helper.h, also remove the include statements. It only needs linux/types.h for uint32_t and a number of forward declarations. Signed-off-by: Thomas Zimmermann Reviewed-by: Sam Ravnborg Link: https://patchwork.freedesktop.org/patch/msgid/20220720083058.15371-6-tzimmermann@suse.de --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 41e4774abdb0..e2d7fd78c57b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -85,6 +85,7 @@ #include #include #include +#include #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" @@ -7717,7 +7718,7 @@ static void dm_drm_plane_destroy_state(struct drm_plane *plane, static const struct drm_plane_funcs dm_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, .disable_plane = drm_atomic_helper_disable_plane, - .destroy = drm_primary_helper_destroy, + .destroy = drm_plane_helper_destroy, .reset = dm_drm_plane_reset, .atomic_duplicate_state = dm_drm_plane_duplicate_state, .atomic_destroy_state = dm_drm_plane_destroy_state, -- cgit v1.2.3-59-g8ed1b From acc96ae0d12783e9781428b17e34fd662a904f0a Mon Sep 17 00:00:00 2001 From: Melissa Wen Date: Thu, 4 Aug 2022 15:13:49 -0100 Subject: drm/amd/display: set panel orientation before drm_dev_register To set the panel orientation property with quirk, we need the mode size provided by EDID. This info is available after EDID is read by dc_link_detect() and updated by amdgpu_dm_update_connector_after_detect(). The detection happens at driver load in amdgpu_dm_initialize_drm_device() and, therefore, we can get modes and set panel orientation before drm_dev_register() to avoid DRM warns on creating the connector property after device registration: [ 2.563969] ------------[ cut here ]------------ [ 2.563971] WARNING: CPU: 6 PID: 325 at drivers/gpu/drm/drm_mode_object.c:45 drm_mode_object_add+0x72/0x80 [drm] [ 2.563997] Modules linked in: btusb btrtl btbcm btintel btmtk bluetooth rfkill ecdh_generic ecc usbhid crc16 amdgpu(+) drm_ttm_helper ttm agpgart gpu_sched i2c_algo_bit drm_display_helper drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops drm serio_raw sdhci_pci atkbd libps2 cqhci vivaldi_fmap ccp sdhci i8042 crct10dif_pclmul crc32_pclmul hid_multitouch ghash_clmulni_intel aesni_intel crypto_simd cryptd wdat_wdt mmc_core cec xhci_pci sp5100_tco rng_core xhci_pci_renesas serio 8250_dw i2c_hid_acpi i2c_hid btrfs blake2b_generic libcrc32c crc32c_generic crc32c_intel xor raid6_pq dm_mirror dm_region_hash dm_log dm_mod pkcs8_key_parser crypto_user [ 2.564032] CPU: 6 PID: 325 Comm: systemd-udevd Not tainted 5.18.0-amd-staging-drm-next+ #67 [ 2.564034] Hardware name: Valve Jupiter/Jupiter, BIOS F7A0105 03/21/2022 [ 2.564036] RIP: 0010:drm_mode_object_add+0x72/0x80 [drm] [ 2.564053] Code: f0 89 c3 85 c0 78 07 89 45 00 44 89 65 04 4c 89 ef e8 e2 99 04 f1 31 c0 85 db 0f 4e c3 5b 5d 41 5c 41 5d c3 80 7f 50 00 74 ac <0f> 0b eb a8 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 41 54 4c [ 2.564055] RSP: 0018:ffffb2e880413860 EFLAGS: 00010202 [ 2.564056] RAX: ffffffffc0ba1440 RBX: ffff99508a860010 RCX: 0000000000000001 [ 2.564057] RDX: 00000000b0b0b0b0 RSI: ffff99508c050110 RDI: ffff99508a860010 [ 2.564058] RBP: ffff99508c050110 R08: 0000000000000020 R09: ffff99508c292c20 [ 2.564059] R10: 0000000000000000 R11: ffff99508c0507d8 R12: 00000000b0b0b0b0 [ 2.564060] R13: 0000000000000004 R14: ffffffffc068a4b6 R15: ffffffffc068a47f [ 2.564061] FS: 00007fc69b5f1a40(0000) GS:ffff9953aff80000(0000) knlGS:0000000000000000 [ 2.564063] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 2.564063] CR2: 00007f9506804000 CR3: 0000000107f92000 CR4: 0000000000350ee0 [ 2.564065] Call Trace: [ 2.564068] [ 2.564070] drm_property_create+0xc9/0x170 [drm] [ 2.564088] drm_property_create_enum+0x1f/0x70 [drm] [ 2.564105] drm_connector_set_panel_orientation_with_quirk+0x96/0xc0 [drm] [ 2.564123] get_modes+0x4fb/0x530 [amdgpu] [ 2.564378] drm_helper_probe_single_connector_modes+0x1ad/0x850 [drm_kms_helper] [ 2.564390] drm_client_modeset_probe+0x229/0x1400 [drm] [ 2.564411] ? xas_store+0x52/0x5e0 [ 2.564416] ? kmem_cache_alloc_trace+0x177/0x2c0 [ 2.564420] __drm_fb_helper_initial_config_and_unlock+0x44/0x4e0 [drm_kms_helper] [ 2.564430] drm_fbdev_client_hotplug+0x173/0x210 [drm_kms_helper] [ 2.564438] drm_fbdev_generic_setup+0xa5/0x166 [drm_kms_helper] [ 2.564446] amdgpu_pci_probe+0x35e/0x370 [amdgpu] [ 2.564621] local_pci_probe+0x45/0x80 [ 2.564625] ? pci_match_device+0xd7/0x130 [ 2.564627] pci_device_probe+0xbf/0x220 [ 2.564629] ? sysfs_do_create_link_sd+0x69/0xd0 [ 2.564633] really_probe+0x19c/0x380 [ 2.564637] __driver_probe_device+0xfe/0x180 [ 2.564639] driver_probe_device+0x1e/0x90 [ 2.564641] __driver_attach+0xc0/0x1c0 [ 2.564643] ? __device_attach_driver+0xe0/0xe0 [ 2.564644] ? __device_attach_driver+0xe0/0xe0 [ 2.564646] bus_for_each_dev+0x78/0xc0 [ 2.564648] bus_add_driver+0x149/0x1e0 [ 2.564650] driver_register+0x8f/0xe0 [ 2.564652] ? 0xffffffffc1023000 [ 2.564654] do_one_initcall+0x44/0x200 [ 2.564657] ? kmem_cache_alloc_trace+0x177/0x2c0 [ 2.564659] do_init_module+0x4c/0x250 [ 2.564663] __do_sys_init_module+0x12e/0x1b0 [ 2.564666] do_syscall_64+0x3b/0x90 [ 2.564670] entry_SYSCALL_64_after_hwframe+0x44/0xae [ 2.564673] RIP: 0033:0x7fc69bff232e [ 2.564674] Code: 48 8b 0d 45 0b 0c 00 f7 d8 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 49 89 ca b8 af 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 12 0b 0c 00 f7 d8 64 89 01 48 [ 2.564676] RSP: 002b:00007ffe872ba3e8 EFLAGS: 00000246 ORIG_RAX: 00000000000000af [ 2.564677] RAX: ffffffffffffffda RBX: 000055873f797820 RCX: 00007fc69bff232e [ 2.564678] RDX: 000055873f7bf390 RSI: 0000000001155e81 RDI: 00007fc699e4d010 [ 2.564679] RBP: 00007fc699e4d010 R08: 000055873f7bfe20 R09: 0000000001155e90 [ 2.564680] R10: 000000055873f7bf R11: 0000000000000246 R12: 000055873f7bf390 [ 2.564681] R13: 000000000000000d R14: 000055873f7c4cb0 R15: 000055873f797820 [ 2.564683] [ 2.564683] ---[ end trace 0000000000000000 ]--- [ 2.564696] ------------[ cut here ]------------ [ 2.564696] WARNING: CPU: 6 PID: 325 at drivers/gpu/drm/drm_mode_object.c:242 drm_object_attach_property+0x52/0x80 [drm] [ 2.564717] Modules linked in: btusb btrtl btbcm btintel btmtk bluetooth rfkill ecdh_generic ecc usbhid crc16 amdgpu(+) drm_ttm_helper ttm agpgart gpu_sched i2c_algo_bit drm_display_helper drm_kms_helper syscopyarea sysfillrect sysimgblt fb_sys_fops drm serio_raw sdhci_pci atkbd libps2 cqhci vivaldi_fmap ccp sdhci i8042 crct10dif_pclmul crc32_pclmul hid_multitouch ghash_clmulni_intel aesni_intel crypto_simd cryptd wdat_wdt mmc_core cec xhci_pci sp5100_tco rng_core xhci_pci_renesas serio 8250_dw i2c_hid_acpi i2c_hid btrfs blake2b_generic libcrc32c crc32c_generic crc32c_intel xor raid6_pq dm_mirror dm_region_hash dm_log dm_mod pkcs8_key_parser crypto_user [ 2.564738] CPU: 6 PID: 325 Comm: systemd-udevd Tainted: G W 5.18.0-amd-staging-drm-next+ #67 [ 2.564740] Hardware name: Valve Jupiter/Jupiter, BIOS F7A0105 03/21/2022 [ 2.564741] RIP: 0010:drm_object_attach_property+0x52/0x80 [drm] [ 2.564759] Code: 2d 83 f8 18 74 33 48 89 74 c1 08 48 8b 4f 08 48 89 94 c1 c8 00 00 00 48 8b 47 08 83 00 01 c3 4d 85 d2 75 dd 83 7f 58 01 75 d7 <0f> 0b eb d3 41 80 78 50 00 74 cc 0f 0b eb c8 44 89 ce 48 c7 c7 28 [ 2.564760] RSP: 0018:ffffb2e8804138d8 EFLAGS: 00010246 [ 2.564761] RAX: 0000000000000010 RBX: ffff99508c1a2000 RCX: ffff99508c1a2180 [ 2.564762] RDX: 0000000000000003 RSI: ffff99508c050100 RDI: ffff99508c1a2040 [ 2.564763] RBP: 00000000ffffffff R08: ffff99508a860010 R09: 00000000c0c0c0c0 [ 2.564763] R10: 0000000000000000 R11: 0000000000000020 R12: ffff99508a860010 [ 2.564764] R13: ffff995088733008 R14: ffff99508c1a2000 R15: ffffffffc068a47f [ 2.564765] FS: 00007fc69b5f1a40(0000) GS:ffff9953aff80000(0000) knlGS:0000000000000000 [ 2.564766] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 2.564767] CR2: 00007f9506804000 CR3: 0000000107f92000 CR4: 0000000000350ee0 [ 2.564768] Call Trace: [ 2.564769] [ 2.564770] drm_connector_set_panel_orientation_with_quirk+0x4a/0xc0 [drm] [ 2.564789] get_modes+0x4fb/0x530 [amdgpu] [ 2.565024] drm_helper_probe_single_connector_modes+0x1ad/0x850 [drm_kms_helper] [ 2.565036] drm_client_modeset_probe+0x229/0x1400 [drm] [ 2.565056] ? xas_store+0x52/0x5e0 [ 2.565060] ? kmem_cache_alloc_trace+0x177/0x2c0 [ 2.565062] __drm_fb_helper_initial_config_and_unlock+0x44/0x4e0 [drm_kms_helper] [ 2.565072] drm_fbdev_client_hotplug+0x173/0x210 [drm_kms_helper] [ 2.565080] drm_fbdev_generic_setup+0xa5/0x166 [drm_kms_helper] [ 2.565088] amdgpu_pci_probe+0x35e/0x370 [amdgpu] [ 2.565261] local_pci_probe+0x45/0x80 [ 2.565263] ? pci_match_device+0xd7/0x130 [ 2.565265] pci_device_probe+0xbf/0x220 [ 2.565267] ? sysfs_do_create_link_sd+0x69/0xd0 [ 2.565268] really_probe+0x19c/0x380 [ 2.565270] __driver_probe_device+0xfe/0x180 [ 2.565272] driver_probe_device+0x1e/0x90 [ 2.565274] __driver_attach+0xc0/0x1c0 [ 2.565276] ? __device_attach_driver+0xe0/0xe0 [ 2.565278] ? __device_attach_driver+0xe0/0xe0 [ 2.565279] bus_for_each_dev+0x78/0xc0 [ 2.565281] bus_add_driver+0x149/0x1e0 [ 2.565283] driver_register+0x8f/0xe0 [ 2.565285] ? 0xffffffffc1023000 [ 2.565286] do_one_initcall+0x44/0x200 [ 2.565288] ? kmem_cache_alloc_trace+0x177/0x2c0 [ 2.565290] do_init_module+0x4c/0x250 [ 2.565291] __do_sys_init_module+0x12e/0x1b0 [ 2.565294] do_syscall_64+0x3b/0x90 [ 2.565296] entry_SYSCALL_64_after_hwframe+0x44/0xae [ 2.565297] RIP: 0033:0x7fc69bff232e [ 2.565298] Code: 48 8b 0d 45 0b 0c 00 f7 d8 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 90 f3 0f 1e fa 49 89 ca b8 af 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 12 0b 0c 00 f7 d8 64 89 01 48 [ 2.565299] RSP: 002b:00007ffe872ba3e8 EFLAGS: 00000246 ORIG_RAX: 00000000000000af [ 2.565301] RAX: ffffffffffffffda RBX: 000055873f797820 RCX: 00007fc69bff232e [ 2.565302] RDX: 000055873f7bf390 RSI: 0000000001155e81 RDI: 00007fc699e4d010 [ 2.565303] RBP: 00007fc699e4d010 R08: 000055873f7bfe20 R09: 0000000001155e90 [ 2.565303] R10: 000000055873f7bf R11: 0000000000000246 R12: 000055873f7bf390 [ 2.565304] R13: 000000000000000d R14: 000055873f7c4cb0 R15: 000055873f797820 [ 2.565306] [ 2.565307] ---[ end trace 0000000000000000 ]--- -- v2: - call amdgpu_dm_connector_get_modes() instead of ddc_get_modes() (Harry) Fixes: d77de7880e0e0 ("amd/display: enable panel orientation quirks") Acked-by: Hans de Goede Signed-off-by: Melissa Wen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 8660d93cc405..08906c7778b4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4135,6 +4135,7 @@ static void register_backlight_device(struct amdgpu_display_manager *dm, } } +static void amdgpu_set_panel_orientation(struct drm_connector *connector); /* * In this architecture, the association @@ -4326,6 +4327,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) adev_to_drm(adev)->vblank_disable_immediate = false; } } + amdgpu_set_panel_orientation(&aconnector->base); } /* Software is initialized. Now we can register interrupt handlers. */ @@ -6684,6 +6686,10 @@ static void amdgpu_set_panel_orientation(struct drm_connector *connector) connector->connector_type != DRM_MODE_CONNECTOR_LVDS) return; + mutex_lock(&connector->dev->mode_config.mutex); + amdgpu_dm_connector_get_modes(connector); + mutex_unlock(&connector->dev->mode_config.mutex); + encoder = amdgpu_dm_connector_to_encoder(connector); if (!encoder) return; @@ -6728,8 +6734,6 @@ static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, * restored here. */ amdgpu_dm_update_freesync_caps(connector, edid); - - amdgpu_set_panel_orientation(connector); } else { amdgpu_dm_connector->num_modes = 0; } -- cgit v1.2.3-59-g8ed1b From a6250bdb6c4677ee77d699b338e077b900f94c0c Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 9 Aug 2022 11:44:05 -0400 Subject: drm/amdgpu: Only disable prefer_shadow on hawaii We changed it for all asics due to a hibernation regression on hawaii, but the workaround breaks suspend on a polaris12. Just disable it for hawaii. Link: https://bugzilla.kernel.org/show_bug.cgi?id=216119 Fixes: 3a4b1cc28fbd ("drm/amdgpu/display: disable prefer_shadow for generic fb helpers") Reviewed-and-tested-by: Mario Limonciello Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c | 3 +-- drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 3 +-- drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 3 +-- drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 3 +-- drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 7 +++++-- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 7 +++++-- 6 files changed, 14 insertions(+), 12 deletions(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index 108e8e8a1a36..576849e95296 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -496,8 +496,7 @@ static int amdgpu_vkms_sw_init(void *handle) adev_to_drm(adev)->mode_config.max_height = YRES_MAX; adev_to_drm(adev)->mode_config.preferred_depth = 24; - /* disable prefer shadow for now due to hibernation issues */ - adev_to_drm(adev)->mode_config.prefer_shadow = 0; + adev_to_drm(adev)->mode_config.prefer_shadow = 1; adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index 9c964cd3b5d4..288fce7dc0ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2796,8 +2796,7 @@ static int dce_v10_0_sw_init(void *handle) adev_to_drm(adev)->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.preferred_depth = 24; - /* disable prefer shadow for now due to hibernation issues */ - adev_to_drm(adev)->mode_config.prefer_shadow = 0; + adev_to_drm(adev)->mode_config.prefer_shadow = 1; adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index e0ad9f27dc3f..cbe5250b31cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2914,8 +2914,7 @@ static int dce_v11_0_sw_init(void *handle) adev_to_drm(adev)->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.preferred_depth = 24; - /* disable prefer shadow for now due to hibernation issues */ - adev_to_drm(adev)->mode_config.prefer_shadow = 0; + adev_to_drm(adev)->mode_config.prefer_shadow = 1; adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index 77f5e998a120..b1c44fab074f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -2673,8 +2673,7 @@ static int dce_v6_0_sw_init(void *handle) adev_to_drm(adev)->mode_config.max_width = 16384; adev_to_drm(adev)->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.preferred_depth = 24; - /* disable prefer shadow for now due to hibernation issues */ - adev_to_drm(adev)->mode_config.prefer_shadow = 0; + adev_to_drm(adev)->mode_config.prefer_shadow = 1; adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 802e5c753271..a22b45c92792 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2693,8 +2693,11 @@ static int dce_v8_0_sw_init(void *handle) adev_to_drm(adev)->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.preferred_depth = 24; - /* disable prefer shadow for now due to hibernation issues */ - adev_to_drm(adev)->mode_config.prefer_shadow = 0; + if (adev->asic_type == CHIP_HAWAII) + /* disable prefer shadow for now due to hibernation issues */ + adev_to_drm(adev)->mode_config.prefer_shadow = 0; + else + adev_to_drm(adev)->mode_config.prefer_shadow = 1; adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 08906c7778b4..5140d9c2bf3b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3825,8 +3825,11 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) adev_to_drm(adev)->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.preferred_depth = 24; - /* disable prefer shadow for now due to hibernation issues */ - adev_to_drm(adev)->mode_config.prefer_shadow = 0; + if (adev->asic_type == CHIP_HAWAII) + /* disable prefer shadow for now due to hibernation issues */ + adev_to_drm(adev)->mode_config.prefer_shadow = 0; + else + adev_to_drm(adev)->mode_config.prefer_shadow = 1; /* indicates support for immediate flip */ adev_to_drm(adev)->mode_config.async_page_flip = true; -- cgit v1.2.3-59-g8ed1b From a61bb3422e8d6ec002dbe288356470540eb5662c Mon Sep 17 00:00:00 2001 From: Jouni Högander Date: Tue, 19 Jul 2022 12:56:59 +0300 Subject: drm/amdgpu_dm: Rely on split out luminance calculation function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Luminance range calculation was split out into drm_edid.c and is now part of edid parsing. Rely on values calculated during edid parsing and use these for caps->aux_max_input_signal and caps->aux_min_input_signal. v2: Use values calculated during edid parsing Cc: Roman Li Cc: Rodrigo Siqueira Cc: Harry Wentland Cc: Lyude Paul Cc: Mika Kahola Cc: Jani Nikula Cc: Manasi Navare Signed-off-by: Jouni Högander Acked-by: Alex Deucher Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20220719095700.14923-3-jouni.hogander@intel.com --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 35 +++-------------------- 1 file changed, 4 insertions(+), 31 deletions(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 8a3e25d35099..85fdd6baf803 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2813,15 +2813,12 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) { - u32 max_avg, min_cll, max, min, q, r; struct amdgpu_dm_backlight_caps *caps; struct amdgpu_display_manager *dm; struct drm_connector *conn_base; struct amdgpu_device *adev; struct dc_link *link = NULL; - static const u8 pre_computed_values[] = { - 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69, - 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98}; + struct drm_luminance_range_info *luminance_range; int i; if (!aconnector || !aconnector->dc_link) @@ -2843,8 +2840,6 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) caps = &dm->backlight_caps[i]; caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; caps->aux_support = false; - max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall; - min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll; if (caps->ext_caps->bits.oled == 1 /*|| caps->ext_caps->bits.sdr_aux_backlight_control == 1 || @@ -2856,31 +2851,9 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) else if (amdgpu_backlight == 1) caps->aux_support = true; - /* From the specification (CTA-861-G), for calculating the maximum - * luminance we need to use: - * Luminance = 50*2**(CV/32) - * Where CV is a one-byte value. - * For calculating this expression we may need float point precision; - * to avoid this complexity level, we take advantage that CV is divided - * by a constant. From the Euclids division algorithm, we know that CV - * can be written as: CV = 32*q + r. Next, we replace CV in the - * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just - * need to pre-compute the value of r/32. For pre-computing the values - * We just used the following Ruby line: - * (0...32).each {|cv| puts (50*2**(cv/32.0)).round} - * The results of the above expressions can be verified at - * pre_computed_values. - */ - q = max_avg >> 5; - r = max_avg % 32; - max = (1 << q) * pre_computed_values[r]; - - // min luminance: maxLum * (CV/255)^2 / 100 - q = DIV_ROUND_CLOSEST(min_cll, 255); - min = max * DIV_ROUND_CLOSEST((q * q), 100); - - caps->aux_max_input_signal = max; - caps->aux_min_input_signal = min; + luminance_range = &conn_base->display_info.luminance_range; + caps->aux_min_input_signal = luminance_range->min_luminance; + caps->aux_max_input_signal = luminance_range->max_luminance; } void amdgpu_dm_update_connector_after_detect( -- cgit v1.2.3-59-g8ed1b From c620e79bb695b866b2cefa0135f7eddd0d5bc9d7 Mon Sep 17 00:00:00 2001 From: Rodrigo Siqueira Date: Mon, 21 Feb 2022 15:22:50 -0500 Subject: drm/amd/display: Add some extra kernel doc to amdgpu_dm Reviewed-by: Harry Wentland Acked-by: Tom Chung Signed-off-by: Rodrigo Siqueira Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 16 ++++++-- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 46 ++++++++++++++++++++--- 2 files changed, 54 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 5140d9c2bf3b..2412dc601ce2 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -9321,6 +9321,7 @@ static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm /** * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. + * * @dev: The DRM device * @state: The atomic state to commit * @@ -9935,8 +9936,18 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, return valid_vsdb_found ? i : -ENODEV; } +/** + * amdgpu_dm_update_freesync_caps - Update Freesync capabilities + * + * @aconnector: Connector to query. + * + * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep + * track of some of the display information in the internal data struct used by + * amdgpu_dm. This function checks which type of connector we need to set the + * FreeSync parameters. + */ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, - struct edid *edid) + struct edid *edid) { int i = 0; struct detailed_timing *timing; @@ -9949,8 +9960,8 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, struct drm_device *dev = connector->dev; struct amdgpu_device *adev = drm_to_adev(dev); - bool freesync_capable = false; struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; + bool freesync_capable = false; if (!connector->state) { DRM_ERROR("%s - Connector has no state", __func__); @@ -9979,7 +9990,6 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (!adev->dm.freesync_module) goto update; - if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || sink->sink_signal == SIGNAL_TYPE_EDP) { bool edid_check_required = false; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index 90b306a1dd68..b44faaad9b0b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -598,6 +598,10 @@ struct amdgpu_dm_connector { * The 'current' sink is in dc_link->sink. */ struct dc_sink *dc_sink; struct dc_link *dc_link; + + /** + * @dc_em_sink: Reference to the emulated (virtual) sink. + */ struct dc_sink *dc_em_sink; /* DM only */ @@ -610,7 +614,16 @@ struct amdgpu_dm_connector { struct amdgpu_i2c_adapter *i2c; /* Monitor range limits */ - int min_vfreq ; + /** + * @min_vfreq: Minimal frequency supported by the display in Hz. This + * value is set to zero when there is no FreeSync support. + */ + int min_vfreq; + + /** + * @max_vfreq: Maximum frequency supported by the display in Hz. This + * value is set to zero when there is no FreeSync support. + */ int max_vfreq ; int pixel_clock_mhz; @@ -705,11 +718,34 @@ struct dm_connector_state { uint64_t pbn; }; +/** + * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info + * + * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this + * struct is useful to keep track of the display-specific information about + * FreeSync. + */ struct amdgpu_hdmi_vsdb_info { - unsigned int amd_vsdb_version; /* VSDB version, should be used to determine which VSIF to send */ - bool freesync_supported; /* FreeSync Supported */ - unsigned int min_refresh_rate_hz; /* FreeSync Minimum Refresh Rate in Hz */ - unsigned int max_refresh_rate_hz; /* FreeSync Maximum Refresh Rate in Hz */ + /** + * @amd_vsdb_version: Vendor Specific Data Block Version, should be + * used to determine which Vendor Specific InfoFrame (VSIF) to send. + */ + unsigned int amd_vsdb_version; + + /** + * @freesync_supported: FreeSync Supported. + */ + bool freesync_supported; + + /** + * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz. + */ + unsigned int min_refresh_rate_hz; + + /** + * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz + */ + unsigned int max_refresh_rate_hz; }; -- cgit v1.2.3-59-g8ed1b From df78f7f660cdd5974b68649a95dbb34da4d4dfa7 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Wed, 17 Aug 2022 15:38:33 -0400 Subject: drm/display/dp_mst: Call them time slots, not VCPI slots MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit VCPI is only sort of the correct term here, originally the majority of this code simply referred to timeslots vaguely as "slots" - and since I started working on it and adding atomic functionality, the name "VCPI slots" has been used to represent time slots. Now that we actually have consistent access to the DisplayPort spec thanks to VESA, I now know this isn't actually the proper term - as the specification refers to these as time slots. Since we're trying to make this code as easy to figure out as possible, let's take this opportunity to correct this nomenclature and call them by their proper name - timeslots. Likewise, we rename various functions appropriately, along with replacing references in the kernel documentation and various debugging messages. It's important to note that this patch series leaves the legacy MST code untouched for the most part, which is fine since we'll be removing it soon anyhow. There should be no functional changes in this series. v2: * Add note that Wayne Lin from AMD suggested regarding slots being between the source DP Tx and the immediate downstream DP Rx Signed-off-by: Lyude Paul Cc: Wayne Lin Cc: Ville Syrjälä Cc: Fangzhi Zuo Cc: Jani Nikula Cc: Imre Deak Cc: Daniel Vetter Cc: Sean Paul Acked-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20220817193847.557945-5-lyude@redhat.com --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +- .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 28 +++--- drivers/gpu/drm/display/drm_dp_mst_topology.c | 106 ++++++++++----------- drivers/gpu/drm/i915/display/intel_dp_mst.c | 5 +- drivers/gpu/drm/nouveau/dispnv50/disp.c | 4 +- include/drm/display/drm_dp_mst_helper.h | 12 ++- 6 files changed, 81 insertions(+), 76 deletions(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 85fdd6baf803..cd30b02af8ee 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6401,7 +6401,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, clock = adjusted_mode->clock; dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false); } - dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_vcpi_slots(state, + dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port, dm_new_connector_state->pbn, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 2e74ccf7df5b..655d63b20b33 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -472,7 +472,7 @@ static int dm_dp_mst_atomic_check(struct drm_connector *connector, return 0; } - return drm_dp_atomic_release_vcpi_slots(state, + return drm_dp_atomic_release_time_slots(state, mst_mgr, mst_port); } @@ -785,7 +785,7 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state, if (initial_slack[next_index] > fair_pbn_alloc) { vars[next_index].pbn += fair_pbn_alloc; - if (drm_dp_atomic_find_vcpi_slots(state, + if (drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, vars[next_index].pbn, @@ -795,7 +795,7 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state, vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn); } else { vars[next_index].pbn -= fair_pbn_alloc; - if (drm_dp_atomic_find_vcpi_slots(state, + if (drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, vars[next_index].pbn, @@ -804,7 +804,7 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state, } } else { vars[next_index].pbn += initial_slack[next_index]; - if (drm_dp_atomic_find_vcpi_slots(state, + if (drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, vars[next_index].pbn, @@ -814,7 +814,7 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state, vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16; } else { vars[next_index].pbn -= initial_slack[next_index]; - if (drm_dp_atomic_find_vcpi_slots(state, + if (drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, vars[next_index].pbn, @@ -872,7 +872,7 @@ static bool try_disable_dsc(struct drm_atomic_state *state, break; vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps); - if (drm_dp_atomic_find_vcpi_slots(state, + if (drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, vars[next_index].pbn, @@ -884,7 +884,7 @@ static bool try_disable_dsc(struct drm_atomic_state *state, vars[next_index].bpp_x16 = 0; } else { vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps); - if (drm_dp_atomic_find_vcpi_slots(state, + if (drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, vars[next_index].pbn, @@ -971,11 +971,11 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; - if (drm_dp_atomic_find_vcpi_slots(state, - params[i].port->mgr, - params[i].port, - vars[i + k].pbn, - dm_mst_get_pbn_divider(dc_link)) < 0) + if (drm_dp_atomic_find_time_slots(state, + params[i].port->mgr, + params[i].port, + vars[i + k].pbn, + dm_mst_get_pbn_divider(dc_link)) < 0) return false; } if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) { @@ -989,7 +989,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); vars[i + k].dsc_enabled = true; vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; - if (drm_dp_atomic_find_vcpi_slots(state, + if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, vars[i + k].pbn, @@ -999,7 +999,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; - if (drm_dp_atomic_find_vcpi_slots(state, + if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, vars[i + k].pbn, diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index f448e3e5ec6e..fad80ab2b9db 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -4293,11 +4293,11 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ EXPORT_SYMBOL(drm_dp_mst_get_edid); /** - * drm_dp_find_vcpi_slots() - Find VCPI slots for this PBN value + * drm_dp_find_vcpi_slots() - Find time slots for this PBN value * @mgr: manager to use * @pbn: payload bandwidth to convert into slots. * - * Calculate the number of VCPI slots that will be required for the given PBN + * Calculate the number of time slots that will be required for the given PBN * value. This function is deprecated, and should not be used in atomic * drivers. * @@ -4334,17 +4334,17 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr, } /** - * drm_dp_atomic_find_vcpi_slots() - Find and add VCPI slots to the state + * drm_dp_atomic_find_time_slots() - Find and add time slots to the state * @state: global atomic state * @mgr: MST topology manager for the port - * @port: port to find vcpi slots for + * @port: port to find time slots for * @pbn: bandwidth required for the mode in PBN * @pbn_div: divider for DSC mode that takes FEC into account * - * Allocates VCPI slots to @port, replacing any previous VCPI allocations it + * Allocates time slots to @port, replacing any previous timeslot allocations it * may have had. Any atomic drivers which support MST must call this function * in their &drm_encoder_helper_funcs.atomic_check() callback to change the - * current VCPI allocation for the new state, but only when + * current timeslot allocation for the new state, but only when * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set * to ensure compatibility with userspace applications that still use the * legacy modesetting UAPI. @@ -4354,17 +4354,17 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr, * * Additionally, it is OK to call this function multiple times on the same * @port as needed. It is not OK however, to call this function and - * drm_dp_atomic_release_vcpi_slots() in the same atomic check phase. + * drm_dp_atomic_release_time_slots() in the same atomic check phase. * * See also: - * drm_dp_atomic_release_vcpi_slots() + * drm_dp_atomic_release_time_slots() * drm_dp_mst_atomic_check() * * Returns: * Total slots in the atomic state assigned for this port, or a negative error * code if the port no longer exists */ -int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, +int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int pbn_div) @@ -4381,17 +4381,17 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, list_for_each_entry(pos, &topology_state->payloads, next) { if (pos->port == port) { payload = pos; - prev_slots = payload->vcpi; + prev_slots = payload->time_slots; prev_bw = payload->pbn; /* * This should never happen, unless the driver tries - * releasing and allocating the same VCPI allocation, + * releasing and allocating the same timeslot allocation, * which is an error */ if (WARN_ON(!prev_slots)) { drm_err(mgr->dev, - "cannot allocate and release VCPI on [MST PORT:%p] in the same state\n", + "cannot allocate and release time slots on [MST PORT:%p] in the same state\n", port); return -EINVAL; } @@ -4409,7 +4409,7 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, req_slots = DIV_ROUND_UP(pbn, pbn_div); - drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n", + drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n", port->connector->base.id, port->connector->name, port, prev_slots, req_slots); drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n", @@ -4426,20 +4426,20 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, payload->port = port; list_add(&payload->next, &topology_state->payloads); } - payload->vcpi = req_slots; + payload->time_slots = req_slots; payload->pbn = pbn; return req_slots; } -EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots); +EXPORT_SYMBOL(drm_dp_atomic_find_time_slots); /** - * drm_dp_atomic_release_vcpi_slots() - Release allocated vcpi slots + * drm_dp_atomic_release_time_slots() - Release allocated time slots * @state: global atomic state * @mgr: MST topology manager for the port - * @port: The port to release the VCPI slots from + * @port: The port to release the time slots from * - * Releases any VCPI slots that have been allocated to a port in the atomic + * Releases any time slots that have been allocated to a port in the atomic * state. Any atomic drivers which support MST must call this function in * their &drm_connector_helper_funcs.atomic_check() callback when the * connector will no longer have VCPI allocated (e.g. because its CRTC was @@ -4448,18 +4448,18 @@ EXPORT_SYMBOL(drm_dp_atomic_find_vcpi_slots); * It is OK to call this even if @port has been removed from the system. * Additionally, it is OK to call this function multiple times on the same * @port as needed. It is not OK however, to call this function and - * drm_dp_atomic_find_vcpi_slots() on the same @port in a single atomic check + * drm_dp_atomic_find_time_slots() on the same @port in a single atomic check * phase. * * See also: - * drm_dp_atomic_find_vcpi_slots() + * drm_dp_atomic_find_time_slots() * drm_dp_mst_atomic_check() * * Returns: * 0 if all slots for this port were added back to * &drm_dp_mst_topology_state.avail_slots or negative error code */ -int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, +int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) { @@ -4483,16 +4483,16 @@ int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, return -EINVAL; } - drm_dbg_atomic(mgr->dev, "[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi); - if (pos->vcpi) { + drm_dbg_atomic(mgr->dev, "[MST PORT:%p] TU %d -> 0\n", port, pos->time_slots); + if (pos->time_slots) { drm_dp_mst_put_port_malloc(port); - pos->vcpi = 0; + pos->time_slots = 0; pos->pbn = 0; } return 0; } -EXPORT_SYMBOL(drm_dp_atomic_release_vcpi_slots); +EXPORT_SYMBOL(drm_dp_atomic_release_time_slots); /** * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format @@ -4546,7 +4546,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots); if (ret) { - drm_dbg_kms(mgr->dev, "failed to init vcpi slots=%d ret=%d\n", + drm_dbg_kms(mgr->dev, "failed to init time slots=%d ret=%d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), ret); drm_dp_mst_topology_put_port(port); goto out; @@ -5071,8 +5071,8 @@ drm_dp_mst_duplicate_state(struct drm_private_obj *obj) INIT_LIST_HEAD(&state->payloads); list_for_each_entry(pos, &old_state->payloads, next) { - /* Prune leftover freed VCPI allocations */ - if (!pos->vcpi) + /* Prune leftover freed timeslot allocations */ + if (!pos->time_slots) continue; payload = kmemdup(pos, sizeof(*payload), GFP_KERNEL); @@ -5104,7 +5104,7 @@ static void drm_dp_mst_destroy_state(struct drm_private_obj *obj, list_for_each_entry_safe(pos, tmp, &mst_state->payloads, next) { /* We only keep references to ports with non-zero VCPIs */ - if (pos->vcpi) + if (pos->time_slots) drm_dp_mst_put_port_malloc(pos->port); kfree(pos); } @@ -5230,28 +5230,28 @@ drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port, } static inline int -drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_topology_state *mst_state) +drm_dp_mst_atomic_check_payload_alloc_limits(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state) { struct drm_dp_mst_atomic_payload *payload; int avail_slots = mst_state->total_avail_slots, payload_count = 0; list_for_each_entry(payload, &mst_state->payloads, next) { /* Releasing payloads is always OK-even if the port is gone */ - if (!payload->vcpi) { - drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all VCPI slots\n", + if (!payload->time_slots) { + drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all time slots\n", payload->port); continue; } - drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d vcpi slots\n", - payload->port, payload->vcpi); + drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d time slots\n", + payload->port, payload->time_slots); - avail_slots -= payload->vcpi; + avail_slots -= payload->time_slots; if (avail_slots < 0) { drm_dbg_atomic(mgr->dev, - "[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n", - payload->port, mst_state, avail_slots + payload->vcpi); + "[MST PORT:%p] not enough time slots in mst state %p (avail=%d)\n", + payload->port, mst_state, avail_slots + payload->time_slots); return -ENOSPC; } @@ -5262,7 +5262,7 @@ drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr, return -EINVAL; } } - drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n", + drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU avail=%d used=%d\n", mgr, mst_state, avail_slots, mst_state->total_avail_slots - avail_slots); return 0; @@ -5351,7 +5351,7 @@ int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state, struct drm_dp_mst_topology_state *mst_state; struct drm_dp_mst_atomic_payload *pos; bool found = false; - int vcpi = 0; + int time_slots = 0; mst_state = drm_atomic_get_mst_topology_state(state, port->mgr); @@ -5367,30 +5367,30 @@ int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state, if (!found) { drm_dbg_atomic(state->dev, - "[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n", + "[MST PORT:%p] Couldn't find payload in mst state %p\n", port, mst_state); return -EINVAL; } if (pos->dsc_enabled == enable) { drm_dbg_atomic(state->dev, - "[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n", - port, enable, pos->vcpi); - vcpi = pos->vcpi; + "[MST PORT:%p] DSC flag is already set to %d, returning %d time slots\n", + port, enable, pos->time_slots); + time_slots = pos->time_slots; } if (enable) { - vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div); + time_slots = drm_dp_atomic_find_time_slots(state, port->mgr, port, pbn, pbn_div); drm_dbg_atomic(state->dev, - "[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n", - port, vcpi); - if (vcpi < 0) + "[MST PORT:%p] Enabling DSC flag, reallocating %d time slots on the port\n", + port, time_slots); + if (time_slots < 0) return -EINVAL; } pos->dsc_enabled = enable; - return vcpi; + return time_slots; } EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc); /** @@ -5400,15 +5400,15 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc); * * Checks the given topology state for an atomic update to ensure that it's * valid. This includes checking whether there's enough bandwidth to support - * the new VCPI allocations in the atomic update. + * the new timeslot allocations in the atomic update. * * Any atomic drivers supporting DP MST must make sure to call this after * checking the rest of their state in their * &drm_mode_config_funcs.atomic_check() callback. * * See also: - * drm_dp_atomic_find_vcpi_slots() - * drm_dp_atomic_release_vcpi_slots() + * drm_dp_atomic_find_time_slots() + * drm_dp_atomic_release_time_slots() * * Returns: * @@ -5424,7 +5424,7 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state) if (!mgr->mst_state) continue; - ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state); + ret = drm_dp_mst_atomic_check_payload_alloc_limits(mgr, mst_state); if (ret) break; diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 14d2a64193b2..1cebbc51d8fa 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -70,7 +70,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, crtc_state->pipe_bpp, false); - slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, + slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr, connector->port, crtc_state->pbn, drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr, @@ -344,8 +344,7 @@ intel_dp_mst_atomic_check(struct drm_connector *connector, } mgr = &enc_to_mst(to_intel_encoder(old_conn_state->best_encoder))->primary->dp.mst_mgr; - ret = drm_dp_atomic_release_vcpi_slots(&state->base, mgr, - intel_connector->port); + ret = drm_dp_atomic_release_time_slots(&state->base, mgr, intel_connector->port); return ret; } diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index c8fedd7d227f..1d54f2cd38b2 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -1065,7 +1065,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder, false); } - slots = drm_dp_atomic_find_vcpi_slots(state, &mstm->mgr, mstc->port, + slots = drm_dp_atomic_find_time_slots(state, &mstm->mgr, mstc->port, asyh->dp.pbn, 0); if (slots < 0) return slots; @@ -1277,7 +1277,7 @@ nv50_mstc_atomic_check(struct drm_connector *connector, return 0; } - return drm_dp_atomic_release_vcpi_slots(state, mgr, mstc->port); + return drm_dp_atomic_release_time_slots(state, mgr, mstc->port); } static int diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h index 5671173f9f37..9cdd2def56a1 100644 --- a/include/drm/display/drm_dp_mst_helper.h +++ b/include/drm/display/drm_dp_mst_helper.h @@ -544,7 +544,13 @@ struct drm_dp_payload { struct drm_dp_mst_atomic_payload { struct drm_dp_mst_port *port; - int vcpi; + + /** + * @time_slots: + * The number of timeslots allocated to this payload from the source DP Tx to + * the immediate downstream DP Rx + */ + int time_slots; int pbn; bool dsc_enabled; struct list_head next; @@ -846,7 +852,7 @@ void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr); int __must_check -drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state, +drm_dp_atomic_find_time_slots(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int pbn_div); @@ -858,7 +864,7 @@ int __must_check drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr); int __must_check -drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state, +drm_dp_atomic_release_time_slots(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, -- cgit v1.2.3-59-g8ed1b From a5c2c0d164e96d24f73faffcd3b7bbb607e701a9 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Wed, 17 Aug 2022 15:38:37 -0400 Subject: drm/display/dp_mst: Add nonblocking helpers for DP MST MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As Daniel Vetter pointed out, if we only use the atomic modesetting locks with MST it's technically possible for a driver with non-blocking modesets to race when it comes to MST displays - as we make the mistake of not doing our own CRTC commit tracking in the topology_state object. This could potentially cause problems if something like this happens: * User starts non-blocking commit to disable CRTC-1 on MST topology 1 * User starts non-blocking commit to enable CRTC-2 on MST topology 1 There's no guarantee here that the commit for disabling CRTC-2 will only occur after CRTC-1 has finished, since neither commit shares a CRTC - only the private modesetting object for MST. Keep in mind this likely isn't a problem for blocking modesets, only non-blocking. So, begin fixing this by keeping track of which CRTCs on a topology have changed by keeping track of which CRTCs we release or allocate timeslots on. As well, add some helpers for: * Setting up the drm_crtc_commit structs in the ->commit_setup hook * Waiting for any CRTC dependencies from the previous topology state v2: * Use drm_dp_mst_atomic_setup_commit() directly - Jani Signed-off-by: Lyude Paul Cc: Wayne Lin Cc: Ville Syrjälä Cc: Fangzhi Zuo Cc: Jani Nikula Cc: Imre Deak Cc: Daniel Vetter Cc: Sean Paul Acked-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20220817193847.557945-9-lyude@redhat.com --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 5 +- drivers/gpu/drm/display/drm_dp_mst_topology.c | 93 +++++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_display.c | 6 ++ drivers/gpu/drm/nouveau/dispnv50/disp.c | 7 ++ include/drm/display/drm_dp_mst_helper.h | 15 ++++ 5 files changed, 124 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index cd30b02af8ee..c97a4d759b94 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2808,7 +2808,8 @@ static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { }; static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { - .atomic_commit_tail = amdgpu_dm_atomic_commit_tail + .atomic_commit_tail = amdgpu_dm_atomic_commit_tail, + .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, }; static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) @@ -7959,6 +7960,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) DRM_ERROR("Waiting for fences timed out!"); drm_atomic_helper_update_legacy_modeset_state(dev, state); + drm_dp_mst_atomic_wait_for_dependencies(state); dm_state = dm_atomic_get_new_state(state); if (dm_state && dm_state->context) { @@ -8357,7 +8359,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dc_release_state(dc_state_temp); } - static int dm_force_atomic_commit(struct drm_connector *connector) { int ret = 0; diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index 1c054a5e2e77..d701e5b819b8 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -4384,12 +4384,16 @@ int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state, { struct drm_dp_mst_topology_state *topology_state; struct drm_dp_mst_atomic_payload *payload = NULL; + struct drm_connector_state *conn_state; int prev_slots = 0, prev_bw = 0, req_slots; topology_state = drm_atomic_get_mst_topology_state(state, mgr); if (IS_ERR(topology_state)) return PTR_ERR(topology_state); + conn_state = drm_atomic_get_new_connector_state(state, port->connector); + topology_state->pending_crtc_mask |= drm_crtc_mask(conn_state->crtc); + /* Find the current allocation for this port, if any */ payload = drm_atomic_get_mst_payload_state(topology_state, port); if (payload) { @@ -4469,11 +4473,15 @@ int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state, { struct drm_dp_mst_topology_state *topology_state; struct drm_dp_mst_atomic_payload *payload; + struct drm_connector_state *conn_state; topology_state = drm_atomic_get_mst_topology_state(state, mgr); if (IS_ERR(topology_state)) return PTR_ERR(topology_state); + conn_state = drm_atomic_get_old_connector_state(state, port->connector); + topology_state->pending_crtc_mask |= drm_crtc_mask(conn_state->crtc); + payload = drm_atomic_get_mst_payload_state(topology_state, port); if (WARN_ON(!payload)) { drm_err(mgr->dev, "No payload for [MST PORT:%p] found in mst state %p\n", @@ -4492,6 +4500,83 @@ int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state, } EXPORT_SYMBOL(drm_dp_atomic_release_time_slots); +/** + * drm_dp_mst_atomic_setup_commit() - setup_commit hook for MST helpers + * @state: global atomic state + * + * This function saves all of the &drm_crtc_commit structs in an atomic state that touch any CRTCs + * currently assigned to an MST topology. Drivers must call this hook from their + * &drm_mode_config_helper_funcs.atomic_commit_setup hook. + * + * Returns: + * 0 if all CRTC commits were retrieved successfully, negative error code otherwise + */ +int drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state) +{ + struct drm_dp_mst_topology_mgr *mgr; + struct drm_dp_mst_topology_state *mst_state; + struct drm_crtc *crtc; + struct drm_crtc_state *crtc_state; + int i, j, commit_idx, num_commit_deps; + + for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { + if (!mst_state->pending_crtc_mask) + continue; + + num_commit_deps = hweight32(mst_state->pending_crtc_mask); + mst_state->commit_deps = kmalloc_array(num_commit_deps, + sizeof(*mst_state->commit_deps), GFP_KERNEL); + if (!mst_state->commit_deps) + return -ENOMEM; + mst_state->num_commit_deps = num_commit_deps; + + commit_idx = 0; + for_each_new_crtc_in_state(state, crtc, crtc_state, j) { + if (mst_state->pending_crtc_mask & drm_crtc_mask(crtc)) { + mst_state->commit_deps[commit_idx++] = + drm_crtc_commit_get(crtc_state->commit); + } + } + } + + return 0; +} +EXPORT_SYMBOL(drm_dp_mst_atomic_setup_commit); + +/** + * drm_dp_mst_atomic_wait_for_dependencies() - Wait for all pending commits on MST topologies + * @state: global atomic state + * + * Goes through any MST topologies in this atomic state, and waits for any pending commits which + * touched CRTCs that were/are on an MST topology to be programmed to hardware and flipped to before + * returning. This is to prevent multiple non-blocking commits affecting an MST topology from racing + * with eachother by forcing them to be executed sequentially in situations where the only resources + * the modeset objects in these commits share are an MST topology. + * + * This function also prepares the new MST state for commit by performing some state preparation + * which can't be done until this point, such as reading back the final VC start slots (which are + * determined at commit-time) from the previous state. + * + * All MST drivers must call this function after calling drm_atomic_helper_wait_for_dependencies(), + * or whatever their equivalent of that is. + */ +void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state) +{ + struct drm_dp_mst_topology_state *old_mst_state; + struct drm_dp_mst_topology_mgr *mgr; + int i, j, ret; + + for_each_old_mst_mgr_in_state(state, mgr, old_mst_state, i) { + for (j = 0; j < old_mst_state->num_commit_deps; j++) { + ret = drm_crtc_commit_wait(old_mst_state->commit_deps[j]); + if (ret < 0) + drm_err(state->dev, "Failed to wait for %s: %d\n", + old_mst_state->commit_deps[j]->crtc->name, ret); + } + } +} +EXPORT_SYMBOL(drm_dp_mst_atomic_wait_for_dependencies); + /** * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format * @mst_state: mst_state to update @@ -5067,6 +5152,9 @@ drm_dp_mst_duplicate_state(struct drm_private_obj *obj) __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base); INIT_LIST_HEAD(&state->payloads); + state->commit_deps = NULL; + state->num_commit_deps = 0; + state->pending_crtc_mask = 0; list_for_each_entry(pos, &old_state->payloads, next) { /* Prune leftover freed timeslot allocations */ @@ -5099,6 +5187,7 @@ static void drm_dp_mst_destroy_state(struct drm_private_obj *obj, struct drm_dp_mst_topology_state *mst_state = to_dp_mst_topology_state(state); struct drm_dp_mst_atomic_payload *pos, *tmp; + int i; list_for_each_entry_safe(pos, tmp, &mst_state->payloads, next) { /* We only keep references to ports with non-zero VCPIs */ @@ -5107,6 +5196,10 @@ static void drm_dp_mst_destroy_state(struct drm_private_obj *obj, kfree(pos); } + for (i = 0; i < mst_state->num_commit_deps; i++) + drm_crtc_commit_put(mst_state->commit_deps[i]); + + kfree(mst_state->commit_deps); kfree(mst_state); } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index bf170bd83ef7..2a45a25e42fb 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -7532,6 +7532,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state) intel_atomic_commit_fence_wait(state); drm_atomic_helper_wait_for_dependencies(&state->base); + drm_dp_mst_atomic_wait_for_dependencies(&state->base); if (state->modeset) wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET); @@ -8600,6 +8601,10 @@ out: return ret; } +static const struct drm_mode_config_helper_funcs intel_mode_config_funcs = { + .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, +}; + static void intel_mode_config_init(struct drm_i915_private *i915) { struct drm_mode_config *mode_config = &i915->drm.mode_config; @@ -8614,6 +8619,7 @@ static void intel_mode_config_init(struct drm_i915_private *i915) mode_config->prefer_shadow = 1; mode_config->funcs = &intel_mode_funcs; + mode_config->helper_private = &intel_mode_config_funcs; mode_config->async_page_flip = HAS_ASYNC_FLIPS(i915); diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 1d54f2cd38b2..c55af5d78ea2 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -2136,6 +2136,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state) nv50_crc_atomic_stop_reporting(state); drm_atomic_helper_wait_for_fences(dev, state, false); drm_atomic_helper_wait_for_dependencies(state); + drm_dp_mst_atomic_wait_for_dependencies(state); drm_atomic_helper_update_legacy_modeset_state(dev, state); drm_atomic_helper_calc_timestamping_constants(state); @@ -2616,6 +2617,11 @@ nv50_disp_func = { .atomic_state_free = nv50_disp_atomic_state_free, }; +static const struct drm_mode_config_helper_funcs +nv50_disp_helper_func = { + .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, +}; + /****************************************************************************** * Init *****************************************************************************/ @@ -2699,6 +2705,7 @@ nv50_display_create(struct drm_device *dev) nouveau_display(dev)->fini = nv50_display_fini; disp->disp = &nouveau_display(dev)->disp; dev->mode_config.funcs = &nv50_disp_func; + dev->mode_config.helper_private = &nv50_disp_helper_func; dev->mode_config.quirk_addfb_prefer_xbgr_30bpp = true; dev->mode_config.normalize_zpos = true; diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h index 3b155ad3eee4..0ef7d0e6cf0c 100644 --- a/include/drm/display/drm_dp_mst_helper.h +++ b/include/drm/display/drm_dp_mst_helper.h @@ -581,6 +581,19 @@ struct drm_dp_mst_topology_state { /** @mgr: The topology manager */ struct drm_dp_mst_topology_mgr *mgr; + /** + * @pending_crtc_mask: A bitmask of all CRTCs this topology state touches, drivers may + * modify this to add additional dependencies if needed. + */ + u32 pending_crtc_mask; + /** + * @commit_deps: A list of all CRTC commits affecting this topology, this field isn't + * populated until drm_dp_mst_atomic_wait_for_dependencies() is called. + */ + struct drm_crtc_commit **commit_deps; + /** @num_commit_deps: The number of CRTC commits in @commit_deps */ + size_t num_commit_deps; + /** @total_avail_slots: The total number of slots this topology can handle (63 or 64) */ u8 total_avail_slots; /** @start_slot: The first usable time slot in this topology (1 or 0) */ @@ -890,6 +903,8 @@ int __must_check drm_dp_atomic_release_time_slots(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); +void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state); +int __must_check drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state); int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, bool power_up); int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, -- cgit v1.2.3-59-g8ed1b From a76eb4297f90301fa9e4c888fb06749ef1be1c86 Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Wed, 17 Aug 2022 15:38:42 -0400 Subject: drm/display/dp_mst: Add helpers for serializing SST <-> MST transitions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There's another kind of situation where we could potentially race with nonblocking modesets and MST, especially if we were to only use the locking provided by atomic modesetting: * Display 1 begins as enabled on DP-1 in SST mode * Display 1 switches to MST mode, exposes one sink in MST mode * Userspace does non-blocking modeset to disable the SST display * Userspace does non-blocking modeset to enable the MST display with a different CRTC, but the SST display hasn't been fully taken down yet * Execution order between the last two commits isn't guaranteed since they share no drm resources We can fix this however, by ensuring that we always pull in the atomic topology state whenever a connector capable of driving an MST display performs its atomic check - and then tracking CRTC commits happening on the SST connector in the MST topology state. So, let's add some simple helpers for doing that and hook them up in various drivers. v2: * Use intel_dp_mst_source_support() to check for MST support in i915, fixes CI failures Signed-off-by: Lyude Paul Cc: Wayne Lin Cc: Ville Syrjälä Cc: Fangzhi Zuo Cc: Jani Nikula Cc: Imre Deak Cc: Daniel Vetter Cc: Sean Paul Acked-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20220817193847.557945-14-lyude@redhat.com --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 7 +++ drivers/gpu/drm/display/drm_dp_mst_topology.c | 59 +++++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_dp.c | 9 ++++ drivers/gpu/drm/nouveau/dispnv50/disp.c | 2 +- drivers/gpu/drm/nouveau/dispnv50/disp.h | 2 + drivers/gpu/drm/nouveau/nouveau_connector.c | 14 ++++++ include/drm/display/drm_dp_mst_helper.h | 2 + 7 files changed, 94 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c97a4d759b94..789748739d79 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6291,10 +6291,17 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn, drm_atomic_get_old_connector_state(state, conn); struct drm_crtc *crtc = new_con_state->crtc; struct drm_crtc_state *new_crtc_state; + struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn); int ret; trace_amdgpu_dm_connector_atomic_check(new_con_state); + if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { + ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr); + if (ret < 0) + return ret; + } + if (!crtc) return 0; diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index 2f7c43f88d74..97e8f8a83ed4 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -4597,6 +4597,65 @@ void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state) } EXPORT_SYMBOL(drm_dp_mst_atomic_wait_for_dependencies); +/** + * drm_dp_mst_root_conn_atomic_check() - Serialize CRTC commits on MST-capable connectors operating + * in SST mode + * @new_conn_state: The new connector state of the &drm_connector + * @mgr: The MST topology manager for the &drm_connector + * + * Since MST uses fake &drm_encoder structs, the generic atomic modesetting code isn't able to + * serialize non-blocking commits happening on the real DP connector of an MST topology switching + * into/away from MST mode - as the CRTC on the real DP connector and the CRTCs on the connector's + * MST topology will never share the same &drm_encoder. + * + * This function takes care of this serialization issue, by checking a root MST connector's atomic + * state to determine if it is about to have a modeset - and then pulling in the MST topology state + * if so, along with adding any relevant CRTCs to &drm_dp_mst_topology_state.pending_crtc_mask. + * + * Drivers implementing MST must call this function from the + * &drm_connector_helper_funcs.atomic_check hook of any physical DP &drm_connector capable of + * driving MST sinks. + * + * Returns: + * 0 on success, negative error code otherwise + */ +int drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state, + struct drm_dp_mst_topology_mgr *mgr) +{ + struct drm_atomic_state *state = new_conn_state->state; + struct drm_connector_state *old_conn_state = + drm_atomic_get_old_connector_state(state, new_conn_state->connector); + struct drm_crtc_state *crtc_state; + struct drm_dp_mst_topology_state *mst_state = NULL; + + if (new_conn_state->crtc) { + crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); + if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) { + mst_state = drm_atomic_get_mst_topology_state(state, mgr); + if (IS_ERR(mst_state)) + return PTR_ERR(mst_state); + + mst_state->pending_crtc_mask |= drm_crtc_mask(new_conn_state->crtc); + } + } + + if (old_conn_state->crtc) { + crtc_state = drm_atomic_get_new_crtc_state(state, old_conn_state->crtc); + if (crtc_state && drm_atomic_crtc_needs_modeset(crtc_state)) { + if (!mst_state) { + mst_state = drm_atomic_get_mst_topology_state(state, mgr); + if (IS_ERR(mst_state)) + return PTR_ERR(mst_state); + } + + mst_state->pending_crtc_mask |= drm_crtc_mask(old_conn_state->crtc); + } + } + + return 0; +} +EXPORT_SYMBOL(drm_dp_mst_root_conn_atomic_check); + /** * drm_dp_mst_update_slots() - updates the slot info depending on the DP ecoding format * @mst_state: mst_state to update diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 32292c0be2bd..a4e113253df3 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -4992,12 +4992,21 @@ static int intel_dp_connector_atomic_check(struct drm_connector *conn, { struct drm_i915_private *dev_priv = to_i915(conn->dev); struct intel_atomic_state *state = to_intel_atomic_state(_state); + struct drm_connector_state *conn_state = drm_atomic_get_new_connector_state(_state, conn); + struct intel_connector *intel_conn = to_intel_connector(conn); + struct intel_dp *intel_dp = enc_to_intel_dp(intel_conn->encoder); int ret; ret = intel_digital_connector_atomic_check(conn, &state->base); if (ret) return ret; + if (intel_dp_mst_source_support(intel_dp)) { + ret = drm_dp_mst_root_conn_atomic_check(conn_state, &intel_dp->mst_mgr); + if (ret) + return ret; + } + /* * We don't enable port sync on BDW due to missing w/as and * due to not having adjusted the modeset sequence appropriately. diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 24807aa9da5f..7e9a0b50bb42 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -1813,7 +1813,7 @@ nv50_sor_func = { .destroy = nv50_sor_destroy, }; -static bool nv50_has_mst(struct nouveau_drm *drm) +bool nv50_has_mst(struct nouveau_drm *drm) { struct nvkm_bios *bios = nvxx_bios(&drm->client.device); u32 data; diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.h b/drivers/gpu/drm/nouveau/dispnv50/disp.h index 38dec11e7dda..9d66c9c726c3 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.h @@ -106,6 +106,8 @@ void nv50_dmac_destroy(struct nv50_dmac *); */ struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder); +bool nv50_has_mst(struct nouveau_drm *drm); + u32 *evo_wait(struct nv50_dmac *, int nr); void evo_kick(u32 *, struct nv50_dmac *); diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index b8ee2173ca8f..1991bbb1d05c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -1106,11 +1106,25 @@ nouveau_connector_best_encoder(struct drm_connector *connector) return NULL; } +static int +nouveau_connector_atomic_check(struct drm_connector *connector, struct drm_atomic_state *state) +{ + struct nouveau_connector *nv_conn = nouveau_connector(connector); + struct drm_connector_state *conn_state = + drm_atomic_get_new_connector_state(state, connector); + + if (!nv_conn->dp_encoder || !nv50_has_mst(nouveau_drm(connector->dev))) + return 0; + + return drm_dp_mst_root_conn_atomic_check(conn_state, &nv_conn->dp_encoder->dp.mstm->mgr); +} + static const struct drm_connector_helper_funcs nouveau_connector_helper_funcs = { .get_modes = nouveau_connector_get_modes, .mode_valid = nouveau_connector_mode_valid, .best_encoder = nouveau_connector_best_encoder, + .atomic_check = nouveau_connector_atomic_check, }; static const struct drm_connector_funcs diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h index 0ef7d0e6cf0c..b9c361b242ea 100644 --- a/include/drm/display/drm_dp_mst_helper.h +++ b/include/drm/display/drm_dp_mst_helper.h @@ -911,6 +911,8 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, struct drm_dp_query_stream_enc_status_ack_reply *status); int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state); +int __must_check drm_dp_mst_root_conn_atomic_check(struct drm_connector_state *new_conn_state, + struct drm_dp_mst_topology_mgr *mgr); void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port); void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port); -- cgit v1.2.3-59-g8ed1b From 4d07b0bc403403438d9cf88450506240c5faf92f Mon Sep 17 00:00:00 2001 From: Lyude Paul Date: Wed, 17 Aug 2022 15:38:46 -0400 Subject: drm/display/dp_mst: Move all payload info into the atomic state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that we've finally gotten rid of the non-atomic MST users leftover in the kernel, we can finally get rid of all of the legacy payload code we have and move as much as possible into the MST atomic state structs. The main purpose of this is to make the MST code a lot less confusing to work on, as there's a lot of duplicated logic that doesn't really need to be here. As well, this should make introducing features like fallback link retraining and DSC support far easier. Since the old payload code was pretty gnarly and there's a Lot of changes here, I expect this might be a bit difficult to review. So to make things as easy as possible for reviewers, I'll sum up how both the old and new code worked here (it took me a while to figure this out too!). The old MST code basically worked by maintaining two different payload tables - proposed_vcpis, and payloads. proposed_vcpis would hold the modified payload we wanted to push to the topology, while payloads held the payload table that was currently programmed in hardware. Modifications to proposed_vcpis would be handled through drm_dp_allocate_vcpi(), drm_dp_mst_deallocate_vcpi(), and drm_dp_mst_reset_vcpi_slots(). Then, they would be pushed via drm_dp_mst_update_payload_step1() and drm_dp_mst_update_payload_step2(). Furthermore, it's important to note how adding and removing VC payloads actually worked with drm_dp_mst_update_payload_step1(). When a VC payload is removed from the VC table, all VC payloads which come after the removed VC payload's slots must have their time slots shifted towards the start of the table. The old code handles this by looping through the entire payload table and recomputing the start slot for every payload in the topology from scratch. While very much overkill, this ends up doing the right thing because we always order the VCPIs for payloads from first to last starting timeslot. It's important to also note that drm_dp_mst_update_payload_step2() isn't actually limited to updating a single payload - the driver can use it to queue up multiple payload changes so that as many of them can be sent as possible before waiting for the ACT. This is -technically- not against spec, but as Wayne Lin has pointed out it's not consistently implemented correctly in hubs - so it might as well be. drm_dp_mst_update_payload_step2() is pretty self explanatory and basically the same between the old and new code, save for the fact we don't have a second step for deleting payloads anymore -and thus rename it to drm_dp_mst_add_payload_step2(). The new payload code stores all of the current payload info within the MST atomic state and computes as much of the state as possible ahead of time. This has the one exception of the starting timeslots for payloads, which can't be determined at atomic check time since the starting time slots will vary depending on what order CRTCs are enabled in the atomic state - which varies from driver to driver. These are still stored in the atomic MST state, but are only copied from the old MST state during atomic commit time. Likewise, this is when new start slots are determined. Adding/removing payloads now works much more closely to how things are described in the spec. When we delete a payload, we loop through the current list of payloads and update the start slots for any payloads whose time slots came after the payload we just deleted. Determining the starting time slots for new payloads being added is done by simply keeping track of where the end of the VC table is in drm_dp_mst_topology_mgr->next_start_slot. Additionally, it's worth noting that we no longer have a single update_payload() function. Instead, we now have drm_dp_mst_add_payload_step1|2() and drm_dp_mst_remove_payload(). As such, it's now left it up to the driver to figure out when to add or remove payloads. The driver already knows when it's disabling/enabling CRTCs, so it also already knows when payloads should be added or removed. Changes since v1: * Refactor around all of the completely dead code changes that are happening in amdgpu for some reason when they really shouldn't even be there in the first place… :\ * Remove mention of sending one ACT per series of payload updates. As Wayne Lin pointed out, there are apparently hubs on the market that don't work correctly with this scheme and require a separate ACT per payload update. * Fix accidental drop of mst_mgr.lock - Wayne Lin * Remove mentions of allowing multiple ACT updates per payload change, mention that this is a result of vendors not consistently supporting this part of the spec and requiring a unique ACT for each payload change. * Get rid of reference to drm_dp_mst_port in DC - turns out I just got myself confused by DC and we don't actually need this. Changes since v2: * Get rid of fix for not sending payload deallocations if ddps=0 and just go back to wayne's fix Signed-off-by: Lyude Paul Cc: Wayne Lin Cc: Ville Syrjälä Cc: Fangzhi Zuo Cc: Jani Nikula Cc: Imre Deak Cc: Daniel Vetter Cc: Sean Paul Acked-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20220817193847.557945-18-lyude@redhat.com --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 56 +- .../drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 104 +-- .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 84 +-- .../drm/amd/display/include/link_service_types.h | 3 + drivers/gpu/drm/display/drm_dp_mst_topology.c | 724 +++++++-------------- drivers/gpu/drm/i915/display/intel_dp_mst.c | 64 +- drivers/gpu/drm/i915/display/intel_hdcp.c | 24 +- drivers/gpu/drm/nouveau/dispnv50/disp.c | 163 +++-- include/drm/display/drm_dp_mst_helper.h | 177 +++-- 9 files changed, 525 insertions(+), 874 deletions(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 789748739d79..9ab01c58bedb 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6385,6 +6385,7 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; struct drm_dp_mst_topology_mgr *mst_mgr; struct drm_dp_mst_port *mst_port; + struct drm_dp_mst_topology_state *mst_state; enum dc_color_depth color_depth; int clock, bpp = 0; bool is_y420 = false; @@ -6398,6 +6399,13 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, if (!crtc_state->connectors_changed && !crtc_state->mode_changed) return 0; + mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr); + if (IS_ERR(mst_state)) + return PTR_ERR(mst_state); + + if (!mst_state->pbn_div) + mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_port->dc_link); + if (!state->duplicated) { int max_bpc = conn_state->max_requested_bpc; is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && @@ -6409,11 +6417,10 @@ static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, clock = adjusted_mode->clock; dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp, false); } - dm_new_connector_state->vcpi_slots = drm_dp_atomic_find_time_slots(state, - mst_mgr, - mst_port, - dm_new_connector_state->pbn, - dm_mst_get_pbn_divider(aconnector->dc_link)); + + dm_new_connector_state->vcpi_slots = + drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port, + dm_new_connector_state->pbn); if (dm_new_connector_state->vcpi_slots < 0) { DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots); return dm_new_connector_state->vcpi_slots; @@ -6483,18 +6490,12 @@ static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, dm_conn_state->pbn = pbn; dm_conn_state->vcpi_slots = slot_num; - drm_dp_mst_atomic_enable_dsc(state, - aconnector->port, - dm_conn_state->pbn, - 0, + drm_dp_mst_atomic_enable_dsc(state, aconnector->port, dm_conn_state->pbn, false); continue; } - vcpi = drm_dp_mst_atomic_enable_dsc(state, - aconnector->port, - pbn, pbn_div, - true); + vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->port, pbn, true); if (vcpi < 0) return vcpi; @@ -9336,8 +9337,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; #if defined(CONFIG_DRM_AMD_DC_DCN) struct dsc_mst_fairness_vars vars[MAX_PIPES]; - struct drm_dp_mst_topology_state *mst_state; - struct drm_dp_mst_topology_mgr *mgr; #endif trace_amdgpu_dm_atomic_check_begin(state); @@ -9576,33 +9575,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, lock_and_validation_needed = true; } -#if defined(CONFIG_DRM_AMD_DC_DCN) - /* set the slot info for each mst_state based on the link encoding format */ - for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { - struct amdgpu_dm_connector *aconnector; - struct drm_connector *connector; - struct drm_connector_list_iter iter; - u8 link_coding_cap; - - if (!mgr->mst_state ) - continue; - - drm_connector_list_iter_begin(dev, &iter); - drm_for_each_connector_iter(connector, &iter) { - int id = connector->index; - - if (id == mst_state->mgr->conn_base_id) { - aconnector = to_amdgpu_dm_connector(connector); - link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); - drm_dp_mst_update_slots(mst_state, link_coding_cap); - - break; - } - } - drm_connector_list_iter_end(&iter); - - } -#endif /** * Streams and planes are reset when there are changes that affect * bandwidth. Anything that affects bandwidth needs to go through diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 0177a275b4df..988b9bf34c93 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -27,6 +27,7 @@ #include #include +#include #include #include #include @@ -154,40 +155,27 @@ enum dc_edid_status dm_helpers_parse_edid_caps( } static void -fill_dc_mst_payload_table_from_drm(struct amdgpu_dm_connector *aconnector, - struct dc_dp_mst_stream_allocation_table *proposed_table) +fill_dc_mst_payload_table_from_drm(struct drm_dp_mst_topology_state *mst_state, + struct amdgpu_dm_connector *aconnector, + struct dc_dp_mst_stream_allocation_table *table) { - int i; - struct drm_dp_mst_topology_mgr *mst_mgr = - &aconnector->mst_port->mst_mgr; - - mutex_lock(&mst_mgr->payload_lock); - - proposed_table->stream_count = 0; - - /* number of active streams */ - for (i = 0; i < mst_mgr->max_payloads; i++) { - if (mst_mgr->payloads[i].num_slots == 0) - break; /* end of vcp_id table */ - - ASSERT(mst_mgr->payloads[i].payload_state != - DP_PAYLOAD_DELETE_LOCAL); - - if (mst_mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL || - mst_mgr->payloads[i].payload_state == - DP_PAYLOAD_REMOTE) { - - struct dc_dp_mst_stream_allocation *sa = - &proposed_table->stream_allocations[ - proposed_table->stream_count]; - - sa->slot_count = mst_mgr->payloads[i].num_slots; - sa->vcp_id = mst_mgr->proposed_vcpis[i]->vcpi; - proposed_table->stream_count++; - } + struct dc_dp_mst_stream_allocation_table new_table = { 0 }; + struct dc_dp_mst_stream_allocation *sa; + struct drm_dp_mst_atomic_payload *payload; + + /* Fill payload info*/ + list_for_each_entry(payload, &mst_state->payloads, next) { + if (payload->delete) + continue; + + sa = &new_table.stream_allocations[new_table.stream_count]; + sa->slot_count = payload->time_slots; + sa->vcp_id = payload->vcpi; + new_table.stream_count++; } - mutex_unlock(&mst_mgr->payload_lock); + /* Overwrite the old table */ + *table = new_table; } void dm_helpers_dp_update_branch_info( @@ -205,11 +193,9 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( bool enable) { struct amdgpu_dm_connector *aconnector; - struct dm_connector_state *dm_conn_state; + struct drm_dp_mst_topology_state *mst_state; + struct drm_dp_mst_atomic_payload *payload; struct drm_dp_mst_topology_mgr *mst_mgr; - struct drm_dp_mst_port *mst_port; - bool ret; - u8 link_coding_cap = DP_8b_10b_ENCODING; aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; /* Accessing the connector state is required for vcpi_slots allocation @@ -220,40 +206,21 @@ bool dm_helpers_dp_mst_write_payload_allocation_table( if (!aconnector || !aconnector->mst_port) return false; - dm_conn_state = to_dm_connector_state(aconnector->base.state); - mst_mgr = &aconnector->mst_port->mst_mgr; - - if (!mst_mgr->mst_state) - return false; - - mst_port = aconnector->port; - -#if defined(CONFIG_DRM_AMD_DC_DCN) - link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); -#endif - - if (enable) { - - ret = drm_dp_mst_allocate_vcpi(mst_mgr, mst_port, - dm_conn_state->pbn, - dm_conn_state->vcpi_slots); - if (!ret) - return false; - - } else { - drm_dp_mst_reset_vcpi_slots(mst_mgr, mst_port); - } + mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); /* It's OK for this to fail */ - drm_dp_update_payload_part1(mst_mgr, (link_coding_cap == DP_CAP_ANSI_128B132B) ? 0:1); + payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port); + if (enable) + drm_dp_add_payload_part1(mst_mgr, mst_state, payload); + else + drm_dp_remove_payload(mst_mgr, mst_state, payload); /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or * AUX message. The sequence is slot 1-63 allocated sequence for each * stream. AMD ASIC stream slot allocation should follow the same * sequence. copy DRM MST allocation to dc */ - - fill_dc_mst_payload_table_from_drm(aconnector, proposed_table); + fill_dc_mst_payload_table_from_drm(mst_state, aconnector, proposed_table); return true; } @@ -310,8 +277,9 @@ bool dm_helpers_dp_mst_send_payload_allocation( bool enable) { struct amdgpu_dm_connector *aconnector; + struct drm_dp_mst_topology_state *mst_state; struct drm_dp_mst_topology_mgr *mst_mgr; - struct drm_dp_mst_port *mst_port; + struct drm_dp_mst_atomic_payload *payload; enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD; enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD; @@ -320,19 +288,16 @@ bool dm_helpers_dp_mst_send_payload_allocation( if (!aconnector || !aconnector->mst_port) return false; - mst_port = aconnector->port; - mst_mgr = &aconnector->mst_port->mst_mgr; + mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); - if (!mst_mgr->mst_state) - return false; - + payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->port); if (!enable) { set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; clr_flag = MST_ALLOCATE_NEW_PAYLOAD; } - if (drm_dp_update_payload_part2(mst_mgr)) { + if (enable && drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, payload)) { amdgpu_dm_set_mst_status(&aconnector->mst_status, set_flag, false); } else { @@ -342,9 +307,6 @@ bool dm_helpers_dp_mst_send_payload_allocation( clr_flag, false); } - if (!enable) - drm_dp_mst_deallocate_vcpi(mst_mgr, mst_port); - return true; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 7a0d6cfa77f5..bd9606307dc7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -597,15 +597,8 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, dc_link_dp_get_max_link_enc_cap(aconnector->dc_link, &max_link_enc_cap); aconnector->mst_mgr.cbs = &dm_mst_cbs; - drm_dp_mst_topology_mgr_init( - &aconnector->mst_mgr, - adev_to_drm(dm->adev), - &aconnector->dm_dp_aux.aux, - 16, - 4, - max_link_enc_cap.lane_count, - drm_dp_bw_code_to_link_rate(max_link_enc_cap.link_rate), - aconnector->connector_id); + drm_dp_mst_topology_mgr_init(&aconnector->mst_mgr, adev_to_drm(dm->adev), + &aconnector->dm_dp_aux.aux, 16, 4, aconnector->connector_id); drm_connector_attach_dp_subconnector_property(&aconnector->base); } @@ -710,6 +703,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) } static bool increase_dsc_bpp(struct drm_atomic_state *state, + struct drm_dp_mst_topology_state *mst_state, struct dc_link *dc_link, struct dsc_mst_fairness_params *params, struct dsc_mst_fairness_vars *vars, @@ -722,12 +716,9 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state, int min_initial_slack; int next_index; int remaining_to_increase = 0; - int pbn_per_timeslot; int link_timeslots_used; int fair_pbn_alloc; - pbn_per_timeslot = dm_mst_get_pbn_divider(dc_link); - for (i = 0; i < count; i++) { if (vars[i + k].dsc_enabled) { initial_slack[i] = @@ -758,17 +749,17 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state, link_timeslots_used = 0; for (i = 0; i < count; i++) - link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, pbn_per_timeslot); + link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, mst_state->pbn_div); - fair_pbn_alloc = (63 - link_timeslots_used) / remaining_to_increase * pbn_per_timeslot; + fair_pbn_alloc = + (63 - link_timeslots_used) / remaining_to_increase * mst_state->pbn_div; if (initial_slack[next_index] > fair_pbn_alloc) { vars[next_index].pbn += fair_pbn_alloc; if (drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, - vars[next_index].pbn, - pbn_per_timeslot) < 0) + vars[next_index].pbn) < 0) return false; if (!drm_dp_mst_atomic_check(state)) { vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn); @@ -777,8 +768,7 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state, if (drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, - vars[next_index].pbn, - pbn_per_timeslot) < 0) + vars[next_index].pbn) < 0) return false; } } else { @@ -786,8 +776,7 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state, if (drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, - vars[next_index].pbn, - pbn_per_timeslot) < 0) + vars[next_index].pbn) < 0) return false; if (!drm_dp_mst_atomic_check(state)) { vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16; @@ -796,8 +785,7 @@ static bool increase_dsc_bpp(struct drm_atomic_state *state, if (drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, - vars[next_index].pbn, - pbn_per_timeslot) < 0) + vars[next_index].pbn) < 0) return false; } } @@ -854,8 +842,7 @@ static bool try_disable_dsc(struct drm_atomic_state *state, if (drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, - vars[next_index].pbn, - dm_mst_get_pbn_divider(dc_link)) < 0) + vars[next_index].pbn) < 0) return false; if (!drm_dp_mst_atomic_check(state)) { @@ -866,8 +853,7 @@ static bool try_disable_dsc(struct drm_atomic_state *state, if (drm_dp_atomic_find_time_slots(state, params[next_index].port->mgr, params[next_index].port, - vars[next_index].pbn, - dm_mst_get_pbn_divider(dc_link)) < 0) + vars[next_index].pbn) < 0) return false; } @@ -881,17 +867,27 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, struct dc_state *dc_state, struct dc_link *dc_link, struct dsc_mst_fairness_vars *vars, + struct drm_dp_mst_topology_mgr *mgr, int *link_vars_start_index) { - int i, k; struct dc_stream_state *stream; struct dsc_mst_fairness_params params[MAX_PIPES]; struct amdgpu_dm_connector *aconnector; + struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_mst_topology_state(state, mgr); int count = 0; + int i, k; bool debugfs_overwrite = false; memset(params, 0, sizeof(params)); + if (IS_ERR(mst_state)) + return false; + + mst_state->pbn_div = dm_mst_get_pbn_divider(dc_link); +#if defined(CONFIG_DRM_AMD_DC_DCN) + drm_dp_mst_update_slots(mst_state, dc_link_dp_mst_decide_link_encoding_format(dc_link)); +#endif + /* Set up params */ for (i = 0; i < dc_state->stream_count; i++) { struct dc_dsc_policy dsc_policy = {0}; @@ -950,11 +946,8 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; - if (drm_dp_atomic_find_time_slots(state, - params[i].port->mgr, - params[i].port, - vars[i + k].pbn, - dm_mst_get_pbn_divider(dc_link)) < 0) + if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, + vars[i + k].pbn) < 0) return false; } if (!drm_dp_mst_atomic_check(state) && !debugfs_overwrite) { @@ -968,21 +961,15 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps); vars[i + k].dsc_enabled = true; vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; - if (drm_dp_atomic_find_time_slots(state, - params[i].port->mgr, - params[i].port, - vars[i + k].pbn, - dm_mst_get_pbn_divider(dc_link)) < 0) + if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, + params[i].port, vars[i + k].pbn) < 0) return false; } else { vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps); vars[i + k].dsc_enabled = false; vars[i + k].bpp_x16 = 0; - if (drm_dp_atomic_find_time_slots(state, - params[i].port->mgr, - params[i].port, - vars[i + k].pbn, - dm_mst_get_pbn_divider(dc_link)) < 0) + if (drm_dp_atomic_find_time_slots(state, params[i].port->mgr, + params[i].port, vars[i + k].pbn) < 0) return false; } } @@ -990,7 +977,7 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, return false; /* Optimize degree of compression */ - if (!increase_dsc_bpp(state, dc_link, params, vars, count, k)) + if (!increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k)) return false; if (!try_disable_dsc(state, dc_link, params, vars, count, k)) @@ -1136,8 +1123,9 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, continue; mutex_lock(&aconnector->mst_mgr.lock); - if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, - vars, &link_vars_start_index)) { + if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, + &aconnector->mst_mgr, + &link_vars_start_index)) { mutex_unlock(&aconnector->mst_mgr.lock); return false; } @@ -1195,10 +1183,8 @@ static bool continue; mutex_lock(&aconnector->mst_mgr.lock); - if (!compute_mst_dsc_configs_for_link(state, - dc_state, - stream->link, - vars, + if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, + &aconnector->mst_mgr, &link_vars_start_index)) { mutex_unlock(&aconnector->mst_mgr.lock); return false; diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h index f75ed6f8fcb8..d76ab72baf0c 100644 --- a/drivers/gpu/drm/amd/display/include/link_service_types.h +++ b/drivers/gpu/drm/amd/display/include/link_service_types.h @@ -251,6 +251,9 @@ union dpcd_training_lane_set { * _ONLY_ be filled out from DM and then passed to DC, do NOT use these for _any_ kind of atomic * state calculations in DM, or you will break something. */ + +struct drm_dp_mst_port; + /* DP MST stream allocation (payload bandwidth number) */ struct dc_dp_mst_stream_allocation { uint8_t vcp_id; diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index c4073d733c59..1de438151cc3 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -68,8 +68,7 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port); static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, - int id, - struct drm_dp_payload *payload); + int id, u8 start_slot, u8 num_slots); static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, @@ -1235,57 +1234,6 @@ build_query_stream_enc_status(struct drm_dp_sideband_msg_tx *msg, u8 stream_id, return 0; } -static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_vcpi *vcpi) -{ - int ret, vcpi_ret; - - mutex_lock(&mgr->payload_lock); - ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1); - if (ret > mgr->max_payloads) { - ret = -EINVAL; - drm_dbg_kms(mgr->dev, "out of payload ids %d\n", ret); - goto out_unlock; - } - - vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1); - if (vcpi_ret > mgr->max_payloads) { - ret = -EINVAL; - drm_dbg_kms(mgr->dev, "out of vcpi ids %d\n", ret); - goto out_unlock; - } - - set_bit(ret, &mgr->payload_mask); - set_bit(vcpi_ret, &mgr->vcpi_mask); - vcpi->vcpi = vcpi_ret + 1; - mgr->proposed_vcpis[ret - 1] = vcpi; -out_unlock: - mutex_unlock(&mgr->payload_lock); - return ret; -} - -static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr, - int vcpi) -{ - int i; - - if (vcpi == 0) - return; - - mutex_lock(&mgr->payload_lock); - drm_dbg_kms(mgr->dev, "putting payload %d\n", vcpi); - clear_bit(vcpi - 1, &mgr->vcpi_mask); - - for (i = 0; i < mgr->max_payloads; i++) { - if (mgr->proposed_vcpis[i] && - mgr->proposed_vcpis[i]->vcpi == vcpi) { - mgr->proposed_vcpis[i] = NULL; - clear_bit(i + 1, &mgr->payload_mask); - } - } - mutex_unlock(&mgr->payload_lock); -} - static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_sideband_msg_tx *txmsg) { @@ -1738,7 +1686,7 @@ drm_dp_mst_dump_port_topology_history(struct drm_dp_mst_port *port) {} #define save_port_topology_ref(port, type) #endif -static struct drm_dp_mst_atomic_payload * +struct drm_dp_mst_atomic_payload * drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state, struct drm_dp_mst_port *port) { @@ -1750,6 +1698,7 @@ drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state, return NULL; } +EXPORT_SYMBOL(drm_atomic_get_mst_payload_state); static void drm_dp_destroy_mst_branch_device(struct kref *kref) { @@ -3252,6 +3201,8 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, struct drm_dp_query_stream_enc_status_ack_reply *status) { + struct drm_dp_mst_topology_state *state; + struct drm_dp_mst_atomic_payload *payload; struct drm_dp_sideband_msg_tx *txmsg; u8 nonce[7]; int ret; @@ -3268,6 +3219,10 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, get_random_bytes(nonce, sizeof(nonce)); + drm_modeset_lock(&mgr->base.lock, NULL); + state = to_drm_dp_mst_topology_state(mgr->base.state); + payload = drm_atomic_get_mst_payload_state(state, port); + /* * "Source device targets the QUERY_STREAM_ENCRYPTION_STATUS message * transaction at the MST Branch device directly connected to the @@ -3275,7 +3230,7 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, */ txmsg->dst = mgr->mst_primary; - build_query_stream_enc_status(txmsg, port->vcpi.vcpi, nonce); + build_query_stream_enc_status(txmsg, payload->vcpi, nonce); drm_dp_queue_down_tx(mgr, txmsg); @@ -3292,6 +3247,7 @@ int drm_dp_send_query_stream_enc_status(struct drm_dp_mst_topology_mgr *mgr, memcpy(status, &txmsg->reply.u.enc_status, sizeof(*status)); out: + drm_modeset_unlock(&mgr->base.lock); drm_dp_mst_topology_put_port(port); out_get_port: kfree(txmsg); @@ -3300,238 +3256,162 @@ out_get_port: EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status); static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr, - int id, - struct drm_dp_payload *payload) + struct drm_dp_mst_atomic_payload *payload) { - int ret; - - ret = drm_dp_dpcd_write_payload(mgr, id, payload); - if (ret < 0) { - payload->payload_state = 0; - return ret; - } - payload->payload_state = DP_PAYLOAD_LOCAL; - return 0; + return drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, + payload->time_slots); } static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port, - int id, - struct drm_dp_payload *payload) + struct drm_dp_mst_atomic_payload *payload) { int ret; + struct drm_dp_mst_port *port = drm_dp_mst_topology_get_port_validated(mgr, payload->port); - ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn); - if (ret < 0) - return ret; - payload->payload_state = DP_PAYLOAD_REMOTE; + if (!port) + return -EIO; + + ret = drm_dp_payload_send_msg(mgr, port, payload->vcpi, payload->pbn); + drm_dp_mst_topology_put_port(port); return ret; } static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port, - int id, - struct drm_dp_payload *payload) + struct drm_dp_mst_topology_state *mst_state, + struct drm_dp_mst_atomic_payload *payload) { drm_dbg_kms(mgr->dev, "\n"); - /* it's okay for these to fail */ - if (port) { - drm_dp_payload_send_msg(mgr, port, id, 0); - } - drm_dp_dpcd_write_payload(mgr, id, payload); - payload->payload_state = DP_PAYLOAD_DELETE_LOCAL; - return 0; -} + /* it's okay for these to fail */ + drm_dp_payload_send_msg(mgr, payload->port, payload->vcpi, 0); + drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, 0); -static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr, - int id, - struct drm_dp_payload *payload) -{ - payload->payload_state = 0; return 0; } /** - * drm_dp_update_payload_part1() - Execute payload update part 1 - * @mgr: manager to use. - * @start_slot: this is the cur slot - * - * NOTE: start_slot is a temporary workaround for non-atomic drivers, - * this will be removed when non-atomic mst helpers are moved out of the helper + * drm_dp_add_payload_part1() - Execute payload update part 1 + * @mgr: Manager to use. + * @mst_state: The MST atomic state + * @payload: The payload to write * - * This iterates over all proposed virtual channels, and tries to - * allocate space in the link for them. For 0->slots transitions, - * this step just writes the VCPI to the MST device. For slots->0 - * transitions, this writes the updated VCPIs and removes the - * remote VC payloads. + * Determines the starting time slot for the given payload, and programs the VCPI for this payload + * into hardware. After calling this, the driver should generate ACT and payload packets. * - * after calling this the driver should generate ACT and payload - * packets. + * Returns: 0 on success, error code on failure. In the event that this fails, + * @payload.vc_start_slot will also be set to -1. */ -int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot) +int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, + struct drm_dp_mst_atomic_payload *payload) { - struct drm_dp_payload req_payload; struct drm_dp_mst_port *port; - int i, j; - int cur_slots = start_slot; - bool skip; + int ret; - mutex_lock(&mgr->payload_lock); - for (i = 0; i < mgr->max_payloads; i++) { - struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i]; - struct drm_dp_payload *payload = &mgr->payloads[i]; - bool put_port = false; + port = drm_dp_mst_topology_get_port_validated(mgr, payload->port); + if (!port) + return 0; - /* solve the current payloads - compare to the hw ones - - update the hw view */ - req_payload.start_slot = cur_slots; - if (vcpi) { - port = container_of(vcpi, struct drm_dp_mst_port, - vcpi); + if (mgr->payload_count == 0) + mgr->next_start_slot = mst_state->start_slot; - mutex_lock(&mgr->lock); - skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary); - mutex_unlock(&mgr->lock); + payload->vc_start_slot = mgr->next_start_slot; - if (skip) { - drm_dbg_kms(mgr->dev, - "Virtual channel %d is not in current topology\n", - i); - continue; - } - /* Validated ports don't matter if we're releasing - * VCPI - */ - if (vcpi->num_slots) { - port = drm_dp_mst_topology_get_port_validated( - mgr, port); - if (!port) { - if (vcpi->num_slots == payload->num_slots) { - cur_slots += vcpi->num_slots; - payload->start_slot = req_payload.start_slot; - continue; - } else { - drm_dbg_kms(mgr->dev, - "Fail:set payload to invalid sink"); - mutex_unlock(&mgr->payload_lock); - return -EINVAL; - } - } - put_port = true; - } + ret = drm_dp_create_payload_step1(mgr, payload); + drm_dp_mst_topology_put_port(port); + if (ret < 0) { + drm_warn(mgr->dev, "Failed to create MST payload for port %p: %d\n", + payload->port, ret); + payload->vc_start_slot = -1; + return ret; + } - req_payload.num_slots = vcpi->num_slots; - req_payload.vcpi = vcpi->vcpi; - } else { - port = NULL; - req_payload.num_slots = 0; - } + mgr->payload_count++; + mgr->next_start_slot += payload->time_slots; - payload->start_slot = req_payload.start_slot; - /* work out what is required to happen with this payload */ - if (payload->num_slots != req_payload.num_slots) { - - /* need to push an update for this payload */ - if (req_payload.num_slots) { - drm_dp_create_payload_step1(mgr, vcpi->vcpi, - &req_payload); - payload->num_slots = req_payload.num_slots; - payload->vcpi = req_payload.vcpi; - - } else if (payload->num_slots) { - payload->num_slots = 0; - drm_dp_destroy_payload_step1(mgr, port, - payload->vcpi, - payload); - req_payload.payload_state = - payload->payload_state; - payload->start_slot = 0; - } - payload->payload_state = req_payload.payload_state; - } - cur_slots += req_payload.num_slots; + return 0; +} +EXPORT_SYMBOL(drm_dp_add_payload_part1); - if (put_port) - drm_dp_mst_topology_put_port(port); - } +/** + * drm_dp_remove_payload() - Remove an MST payload + * @mgr: Manager to use. + * @mst_state: The MST atomic state + * @payload: The payload to write + * + * Removes a payload from an MST topology if it was successfully assigned a start slot. Also updates + * the starting time slots of all other payloads which would have been shifted towards the start of + * the VC table as a result. After calling this, the driver should generate ACT and payload packets. + */ +void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, + struct drm_dp_mst_atomic_payload *payload) +{ + struct drm_dp_mst_atomic_payload *pos; + bool send_remove = false; - for (i = 0; i < mgr->max_payloads; /* do nothing */) { - if (mgr->payloads[i].payload_state != DP_PAYLOAD_DELETE_LOCAL) { - i++; - continue; - } + /* We failed to make the payload, so nothing to do */ + if (payload->vc_start_slot == -1) + return; - drm_dbg_kms(mgr->dev, "removing payload %d\n", i); - for (j = i; j < mgr->max_payloads - 1; j++) { - mgr->payloads[j] = mgr->payloads[j + 1]; - mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1]; + mutex_lock(&mgr->lock); + send_remove = drm_dp_mst_port_downstream_of_branch(payload->port, mgr->mst_primary); + mutex_unlock(&mgr->lock); - if (mgr->proposed_vcpis[j] && - mgr->proposed_vcpis[j]->num_slots) { - set_bit(j + 1, &mgr->payload_mask); - } else { - clear_bit(j + 1, &mgr->payload_mask); - } - } + if (send_remove) + drm_dp_destroy_payload_step1(mgr, mst_state, payload); + else + drm_dbg_kms(mgr->dev, "Payload for VCPI %d not in topology, not sending remove\n", + payload->vcpi); - memset(&mgr->payloads[mgr->max_payloads - 1], 0, - sizeof(struct drm_dp_payload)); - mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL; - clear_bit(mgr->max_payloads, &mgr->payload_mask); + list_for_each_entry(pos, &mst_state->payloads, next) { + if (pos != payload && pos->vc_start_slot > payload->vc_start_slot) + pos->vc_start_slot -= payload->time_slots; } - mutex_unlock(&mgr->payload_lock); + payload->vc_start_slot = -1; - return 0; + mgr->payload_count--; + mgr->next_start_slot -= payload->time_slots; } -EXPORT_SYMBOL(drm_dp_update_payload_part1); +EXPORT_SYMBOL(drm_dp_remove_payload); /** - * drm_dp_update_payload_part2() - Execute payload update part 2 - * @mgr: manager to use. + * drm_dp_add_payload_part2() - Execute payload update part 2 + * @mgr: Manager to use. + * @state: The global atomic state + * @payload: The payload to update + * + * If @payload was successfully assigned a starting time slot by drm_dp_add_payload_part1(), this + * function will send the sideband messages to finish allocating this payload. * - * This iterates over all proposed virtual channels, and tries to - * allocate space in the link for them. For 0->slots transitions, - * this step writes the remote VC payload commands. For slots->0 - * this just resets some internal state. + * Returns: 0 on success, negative error code on failure. */ -int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr) +int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr, + struct drm_atomic_state *state, + struct drm_dp_mst_atomic_payload *payload) { - struct drm_dp_mst_port *port; - int i; int ret = 0; - bool skip; - - mutex_lock(&mgr->payload_lock); - for (i = 0; i < mgr->max_payloads; i++) { - - if (!mgr->proposed_vcpis[i]) - continue; - - port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); - mutex_lock(&mgr->lock); - skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary); - mutex_unlock(&mgr->lock); - - if (skip) - continue; + /* Skip failed payloads */ + if (payload->vc_start_slot == -1) { + drm_dbg_kms(state->dev, "Part 1 of payload creation for %s failed, skipping part 2\n", + payload->port->connector->name); + return -EIO; + } - drm_dbg_kms(mgr->dev, "payload %d %d\n", i, mgr->payloads[i].payload_state); - if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) { - ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]); - } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) { - ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]); - } - if (ret) { - mutex_unlock(&mgr->payload_lock); - return ret; - } + ret = drm_dp_create_payload_step2(mgr, payload); + if (ret < 0) { + if (!payload->delete) + drm_err(mgr->dev, "Step 2 of creating MST payload for %p failed: %d\n", + payload->port, ret); + else + drm_dbg_kms(mgr->dev, "Step 2 of removing MST payload for %p failed: %d\n", + payload->port, ret); } - mutex_unlock(&mgr->payload_lock); - return 0; + + return ret; } -EXPORT_SYMBOL(drm_dp_update_payload_part2); +EXPORT_SYMBOL(drm_dp_add_payload_part2); static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, @@ -3711,7 +3591,6 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms int ret = 0; struct drm_dp_mst_branch *mstb = NULL; - mutex_lock(&mgr->payload_lock); mutex_lock(&mgr->lock); if (mst_state == mgr->mst_state) goto out_unlock; @@ -3719,10 +3598,6 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms mgr->mst_state = mst_state; /* set the device into MST mode */ if (mst_state) { - struct drm_dp_payload reset_pay; - int lane_count; - int link_rate; - WARN_ON(mgr->mst_primary); /* get dpcd info */ @@ -3733,16 +3608,6 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms goto out_unlock; } - lane_count = min_t(int, mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK, mgr->max_lane_count); - link_rate = min_t(int, drm_dp_bw_code_to_link_rate(mgr->dpcd[1]), mgr->max_link_rate); - mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr, - link_rate, - lane_count); - if (mgr->pbn_div == 0) { - ret = -EINVAL; - goto out_unlock; - } - /* add initial branch device at LCT 1 */ mstb = drm_dp_add_mst_branch_device(1, NULL); if (mstb == NULL) { @@ -3762,9 +3627,8 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms if (ret < 0) goto out_unlock; - reset_pay.start_slot = 0; - reset_pay.num_slots = 0x3f; - drm_dp_dpcd_write_payload(mgr, 0, &reset_pay); + /* Write reset payload */ + drm_dp_dpcd_write_payload(mgr, 0, 0, 0x3f); queue_work(system_long_wq, &mgr->work); @@ -3776,19 +3640,11 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms /* this can fail if the device is gone */ drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); ret = 0; - memset(mgr->payloads, 0, - mgr->max_payloads * sizeof(mgr->payloads[0])); - memset(mgr->proposed_vcpis, 0, - mgr->max_payloads * sizeof(mgr->proposed_vcpis[0])); - mgr->payload_mask = 0; - set_bit(0, &mgr->payload_mask); - mgr->vcpi_mask = 0; mgr->payload_id_table_cleared = false; } out_unlock: mutex_unlock(&mgr->lock); - mutex_unlock(&mgr->payload_lock); if (mstb) drm_dp_mst_topology_put_mstb(mstb); return ret; @@ -4307,62 +4163,18 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_ } EXPORT_SYMBOL(drm_dp_mst_get_edid); -/** - * drm_dp_find_vcpi_slots() - Find time slots for this PBN value - * @mgr: manager to use - * @pbn: payload bandwidth to convert into slots. - * - * Calculate the number of time slots that will be required for the given PBN - * value. This function is deprecated, and should not be used in atomic - * drivers. - * - * RETURNS: - * The total slots required for this port, or error. - */ -int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, - int pbn) -{ - int num_slots; - - num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); - - /* max. time slots - one slot for MTP header */ - if (num_slots > 63) - return -ENOSPC; - return num_slots; -} -EXPORT_SYMBOL(drm_dp_find_vcpi_slots); - -static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_vcpi *vcpi, int pbn, int slots) -{ - int ret; - - vcpi->pbn = pbn; - vcpi->aligned_pbn = slots * mgr->pbn_div; - vcpi->num_slots = slots; - - ret = drm_dp_mst_assign_payload_id(mgr, vcpi); - if (ret < 0) - return ret; - return 0; -} - /** * drm_dp_atomic_find_time_slots() - Find and add time slots to the state * @state: global atomic state * @mgr: MST topology manager for the port * @port: port to find time slots for * @pbn: bandwidth required for the mode in PBN - * @pbn_div: divider for DSC mode that takes FEC into account * - * Allocates time slots to @port, replacing any previous timeslot allocations it - * may have had. Any atomic drivers which support MST must call this function - * in their &drm_encoder_helper_funcs.atomic_check() callback to change the - * current timeslot allocation for the new state, but only when - * &drm_crtc_state.mode_changed or &drm_crtc_state.connectors_changed is set - * to ensure compatibility with userspace applications that still use the - * legacy modesetting UAPI. + * Allocates time slots to @port, replacing any previous time slot allocations it may + * have had. Any atomic drivers which support MST must call this function in + * their &drm_encoder_helper_funcs.atomic_check() callback unconditionally to + * change the current time slot allocation for the new state, and ensure the MST + * atomic state is added whenever the state of payloads in the topology changes. * * Allocations set by this function are not checked against the bandwidth * restraints of @mgr until the driver calls drm_dp_mst_atomic_check(). @@ -4381,8 +4193,7 @@ static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr, */ int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port, int pbn, - int pbn_div) + struct drm_dp_mst_port *port, int pbn) { struct drm_dp_mst_topology_state *topology_state; struct drm_dp_mst_atomic_payload *payload = NULL; @@ -4415,10 +4226,7 @@ int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state, } } - if (pbn_div <= 0) - pbn_div = mgr->pbn_div; - - req_slots = DIV_ROUND_UP(pbn, pbn_div); + req_slots = DIV_ROUND_UP(pbn, topology_state->pbn_div); drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] TU %d -> %d\n", port->connector->base.id, port->connector->name, @@ -4427,7 +4235,7 @@ int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state, port->connector->base.id, port->connector->name, port, prev_bw, pbn); - /* Add the new allocation to the state */ + /* Add the new allocation to the state, note the VCPI isn't assigned until the end */ if (!payload) { payload = kzalloc(sizeof(*payload), GFP_KERNEL); if (!payload) @@ -4435,6 +4243,7 @@ int drm_dp_atomic_find_time_slots(struct drm_atomic_state *state, drm_dp_mst_get_port_malloc(port); payload->port = port; + payload->vc_start_slot = -1; list_add(&payload->next, &topology_state->payloads); } payload->time_slots = req_slots; @@ -4451,10 +4260,12 @@ EXPORT_SYMBOL(drm_dp_atomic_find_time_slots); * @port: The port to release the time slots from * * Releases any time slots that have been allocated to a port in the atomic - * state. Any atomic drivers which support MST must call this function in - * their &drm_connector_helper_funcs.atomic_check() callback when the - * connector will no longer have VCPI allocated (e.g. because its CRTC was - * removed) when it had VCPI allocated in the previous atomic state. + * state. Any atomic drivers which support MST must call this function + * unconditionally in their &drm_connector_helper_funcs.atomic_check() callback. + * This helper will check whether time slots would be released by the new state and + * respond accordingly, along with ensuring the MST state is always added to the + * atomic state whenever a new state would modify the state of payloads on the + * topology. * * It is OK to call this even if @port has been removed from the system. * Additionally, it is OK to call this function multiple times on the same @@ -4516,6 +4327,7 @@ int drm_dp_atomic_release_time_slots(struct drm_atomic_state *state, drm_dp_mst_put_port_malloc(port); payload->pbn = 0; payload->delete = true; + topology_state->payload_mask &= ~BIT(payload->vcpi - 1); } return 0; @@ -4566,7 +4378,8 @@ int drm_dp_mst_atomic_setup_commit(struct drm_atomic_state *state) EXPORT_SYMBOL(drm_dp_mst_atomic_setup_commit); /** - * drm_dp_mst_atomic_wait_for_dependencies() - Wait for all pending commits on MST topologies + * drm_dp_mst_atomic_wait_for_dependencies() - Wait for all pending commits on MST topologies, + * prepare new MST state for commit * @state: global atomic state * * Goes through any MST topologies in this atomic state, and waits for any pending commits which @@ -4584,17 +4397,30 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_setup_commit); */ void drm_dp_mst_atomic_wait_for_dependencies(struct drm_atomic_state *state) { - struct drm_dp_mst_topology_state *old_mst_state; + struct drm_dp_mst_topology_state *old_mst_state, *new_mst_state; struct drm_dp_mst_topology_mgr *mgr; + struct drm_dp_mst_atomic_payload *old_payload, *new_payload; int i, j, ret; - for_each_old_mst_mgr_in_state(state, mgr, old_mst_state, i) { + for_each_oldnew_mst_mgr_in_state(state, mgr, old_mst_state, new_mst_state, i) { for (j = 0; j < old_mst_state->num_commit_deps; j++) { ret = drm_crtc_commit_wait(old_mst_state->commit_deps[j]); if (ret < 0) drm_err(state->dev, "Failed to wait for %s: %d\n", old_mst_state->commit_deps[j]->crtc->name, ret); } + + /* Now that previous state is committed, it's safe to copy over the start slot + * assignments + */ + list_for_each_entry(old_payload, &old_mst_state->payloads, next) { + if (old_payload->delete) + continue; + + new_payload = drm_atomic_get_mst_payload_state(new_mst_state, + old_payload->port); + new_payload->vc_start_slot = old_payload->vc_start_slot; + } } } EXPORT_SYMBOL(drm_dp_mst_atomic_wait_for_dependencies); @@ -4679,119 +4505,8 @@ void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_ } EXPORT_SYMBOL(drm_dp_mst_update_slots); -/** - * drm_dp_mst_allocate_vcpi() - Allocate a virtual channel - * @mgr: manager for this port - * @port: port to allocate a virtual channel for. - * @pbn: payload bandwidth number to request - * @slots: returned number of slots for this PBN. - */ -bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port, int pbn, int slots) -{ - int ret; - - if (slots < 0) - return false; - - port = drm_dp_mst_topology_get_port_validated(mgr, port); - if (!port) - return false; - - if (port->vcpi.vcpi > 0) { - drm_dbg_kms(mgr->dev, - "payload: vcpi %d already allocated for pbn %d - requested pbn %d\n", - port->vcpi.vcpi, port->vcpi.pbn, pbn); - if (pbn == port->vcpi.pbn) { - drm_dp_mst_topology_put_port(port); - return true; - } - } - - ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots); - if (ret) { - drm_dbg_kms(mgr->dev, "failed to init time slots=%d ret=%d\n", - DIV_ROUND_UP(pbn, mgr->pbn_div), ret); - drm_dp_mst_topology_put_port(port); - goto out; - } - drm_dbg_kms(mgr->dev, "initing vcpi for pbn=%d slots=%d\n", pbn, port->vcpi.num_slots); - - /* Keep port allocated until its payload has been removed */ - drm_dp_mst_get_port_malloc(port); - drm_dp_mst_topology_put_port(port); - return true; -out: - return false; -} -EXPORT_SYMBOL(drm_dp_mst_allocate_vcpi); - -int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) -{ - int slots = 0; - - port = drm_dp_mst_topology_get_port_validated(mgr, port); - if (!port) - return slots; - - slots = port->vcpi.num_slots; - drm_dp_mst_topology_put_port(port); - return slots; -} -EXPORT_SYMBOL(drm_dp_mst_get_vcpi_slots); - -/** - * drm_dp_mst_reset_vcpi_slots() - Reset number of slots to 0 for VCPI - * @mgr: manager for this port - * @port: unverified pointer to a port. - * - * This just resets the number of slots for the ports VCPI for later programming. - */ -void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) -{ - /* - * A port with VCPI will remain allocated until its VCPI is - * released, no verified ref needed - */ - - port->vcpi.num_slots = 0; -} -EXPORT_SYMBOL(drm_dp_mst_reset_vcpi_slots); - -/** - * drm_dp_mst_deallocate_vcpi() - deallocate a VCPI - * @mgr: manager for this port - * @port: port to deallocate vcpi for - * - * This can be called unconditionally, regardless of whether - * drm_dp_mst_allocate_vcpi() succeeded or not. - */ -void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port) -{ - bool skip; - - if (!port->vcpi.vcpi) - return; - - mutex_lock(&mgr->lock); - skip = !drm_dp_mst_port_downstream_of_branch(port, mgr->mst_primary); - mutex_unlock(&mgr->lock); - - if (skip) - return; - - drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); - port->vcpi.num_slots = 0; - port->vcpi.pbn = 0; - port->vcpi.aligned_pbn = 0; - port->vcpi.vcpi = 0; - drm_dp_mst_put_port_malloc(port); -} -EXPORT_SYMBOL(drm_dp_mst_deallocate_vcpi); - static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, - int id, struct drm_dp_payload *payload) + int id, u8 start_slot, u8 num_slots) { u8 payload_alloc[3], status; int ret; @@ -4801,8 +4516,8 @@ static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, DP_PAYLOAD_TABLE_UPDATED); payload_alloc[0] = id; - payload_alloc[1] = payload->start_slot; - payload_alloc[2] = payload->num_slots; + payload_alloc[1] = start_slot; + payload_alloc[2] = num_slots; ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3); if (ret != 3) { @@ -5017,8 +4732,9 @@ static void fetch_monitor_name(struct drm_dp_mst_topology_mgr *mgr, void drm_dp_mst_dump_topology(struct seq_file *m, struct drm_dp_mst_topology_mgr *mgr) { - int i; - struct drm_dp_mst_port *port; + struct drm_dp_mst_topology_state *state; + struct drm_dp_mst_atomic_payload *payload; + int i, ret; mutex_lock(&mgr->lock); if (mgr->mst_primary) @@ -5027,36 +4743,35 @@ void drm_dp_mst_dump_topology(struct seq_file *m, /* dump VCPIs */ mutex_unlock(&mgr->lock); - mutex_lock(&mgr->payload_lock); - seq_printf(m, "\n*** VCPI Info ***\n"); - seq_printf(m, "payload_mask: %lx, vcpi_mask: %lx, max_payloads: %d\n", mgr->payload_mask, mgr->vcpi_mask, mgr->max_payloads); + ret = drm_modeset_lock_single_interruptible(&mgr->base.lock); + if (ret < 0) + return; - seq_printf(m, "\n| idx | port # | vcp_id | # slots | sink name |\n"); + state = to_drm_dp_mst_topology_state(mgr->base.state); + seq_printf(m, "\n*** Atomic state info ***\n"); + seq_printf(m, "payload_mask: %x, max_payloads: %d, start_slot: %u, pbn_div: %d\n", + state->payload_mask, mgr->max_payloads, state->start_slot, state->pbn_div); + + seq_printf(m, "\n| idx | port | vcpi | slots | pbn | dsc | sink name |\n"); for (i = 0; i < mgr->max_payloads; i++) { - if (mgr->proposed_vcpis[i]) { + list_for_each_entry(payload, &state->payloads, next) { char name[14]; - port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); - fetch_monitor_name(mgr, port, name, sizeof(name)); - seq_printf(m, "%10d%10d%10d%10d%20s\n", + if (payload->vcpi != i || payload->delete) + continue; + + fetch_monitor_name(mgr, payload->port, name, sizeof(name)); + seq_printf(m, " %5d %6d %6d %02d - %02d %5d %5s %19s\n", i, - port->port_num, - port->vcpi.vcpi, - port->vcpi.num_slots, + payload->port->port_num, + payload->vcpi, + payload->vc_start_slot, + payload->vc_start_slot + payload->time_slots - 1, + payload->pbn, + payload->dsc_enabled ? "Y" : "N", (*name != 0) ? name : "Unknown"); - } else - seq_printf(m, "%6d - Unused\n", i); - } - seq_printf(m, "\n*** Payload Info ***\n"); - seq_printf(m, "| idx | state | start slot | # slots |\n"); - for (i = 0; i < mgr->max_payloads; i++) { - seq_printf(m, "%10d%10d%15d%10d\n", - i, - mgr->payloads[i].payload_state, - mgr->payloads[i].start_slot, - mgr->payloads[i].num_slots); + } } - mutex_unlock(&mgr->payload_lock); seq_printf(m, "\n*** DPCD Info ***\n"); mutex_lock(&mgr->lock); @@ -5102,7 +4817,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m, out: mutex_unlock(&mgr->lock); - + drm_modeset_unlock(&mgr->base.lock); } EXPORT_SYMBOL(drm_dp_mst_dump_topology); @@ -5423,9 +5138,22 @@ drm_dp_mst_atomic_check_payload_alloc_limits(struct drm_dp_mst_topology_mgr *mgr mgr, mst_state, mgr->max_payloads); return -EINVAL; } + + /* Assign a VCPI */ + if (!payload->vcpi) { + payload->vcpi = ffz(mst_state->payload_mask) + 1; + drm_dbg_atomic(mgr->dev, "[MST PORT:%p] assigned VCPI #%d\n", + payload->port, payload->vcpi); + mst_state->payload_mask |= BIT(payload->vcpi - 1); + } } - drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU avail=%d used=%d\n", - mgr, mst_state, avail_slots, mst_state->total_avail_slots - avail_slots); + + if (!payload_count) + mst_state->pbn_div = 0; + + drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p TU pbn_div=%d avail=%d used=%d\n", + mgr, mst_state, mst_state->pbn_div, avail_slots, + mst_state->total_avail_slots - avail_slots); return 0; } @@ -5496,7 +5224,6 @@ EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs); * @state: Pointer to the new drm_atomic_state * @port: Pointer to the affected MST Port * @pbn: Newly recalculated bw required for link with DSC enabled - * @pbn_div: Divider to calculate correct number of pbn per slot * @enable: Boolean flag to enable or disable DSC on the port * * This function enables DSC on the given Port @@ -5507,8 +5234,7 @@ EXPORT_SYMBOL(drm_dp_mst_add_affected_dsc_crtcs); */ int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state, struct drm_dp_mst_port *port, - int pbn, int pbn_div, - bool enable) + int pbn, bool enable) { struct drm_dp_mst_topology_state *mst_state; struct drm_dp_mst_atomic_payload *payload; @@ -5534,7 +5260,7 @@ int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state, } if (enable) { - time_slots = drm_dp_atomic_find_time_slots(state, port->mgr, port, pbn, pbn_div); + time_slots = drm_dp_atomic_find_time_slots(state, port->mgr, port, pbn); drm_dbg_atomic(state->dev, "[MST PORT:%p] Enabling DSC flag, reallocating %d time slots on the port\n", port, time_slots); @@ -5547,6 +5273,7 @@ int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state, return time_slots; } EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc); + /** * drm_dp_mst_atomic_check - Check that the new state of an MST topology in an * atomic update is valid @@ -5604,7 +5331,6 @@ EXPORT_SYMBOL(drm_dp_mst_topology_state_funcs); /** * drm_atomic_get_mst_topology_state: get MST topology state - * * @state: global atomic state * @mgr: MST topology manager, also the private object in this case * @@ -5623,6 +5349,31 @@ struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_a } EXPORT_SYMBOL(drm_atomic_get_mst_topology_state); +/** + * drm_atomic_get_new_mst_topology_state: get new MST topology state in atomic state, if any + * @state: global atomic state + * @mgr: MST topology manager, also the private object in this case + * + * This function wraps drm_atomic_get_priv_obj_state() passing in the MST atomic + * state vtable so that the private object state returned is that of a MST + * topology object. + * + * Returns: + * + * The MST topology state, or NULL if there's no topology state for this MST mgr + * in the global atomic state + */ +struct drm_dp_mst_topology_state * +drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr) +{ + struct drm_private_state *priv_state = + drm_atomic_get_new_private_obj_state(state, &mgr->base); + + return priv_state ? to_dp_mst_topology_state(priv_state) : NULL; +} +EXPORT_SYMBOL(drm_atomic_get_new_mst_topology_state); + /** * drm_dp_mst_topology_mgr_init - initialise a topology manager * @mgr: manager struct to initialise @@ -5630,8 +5381,6 @@ EXPORT_SYMBOL(drm_atomic_get_mst_topology_state); * @aux: DP helper aux channel to talk to this device * @max_dpcd_transaction_bytes: hw specific DPCD transaction limit * @max_payloads: maximum number of payloads this GPU can source - * @max_lane_count: maximum number of lanes this GPU supports - * @max_link_rate: maximum link rate per lane this GPU supports in kHz * @conn_base_id: the connector object ID the MST device is connected to. * * Return 0 for success, or negative error code on failure @@ -5639,14 +5388,12 @@ EXPORT_SYMBOL(drm_atomic_get_mst_topology_state); int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct drm_device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, int max_payloads, - int max_lane_count, int max_link_rate, int conn_base_id) { struct drm_dp_mst_topology_state *mst_state; mutex_init(&mgr->lock); mutex_init(&mgr->qlock); - mutex_init(&mgr->payload_lock); mutex_init(&mgr->delayed_destroy_lock); mutex_init(&mgr->up_req_lock); mutex_init(&mgr->probe_lock); @@ -5676,19 +5423,7 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, mgr->aux = aux; mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes; mgr->max_payloads = max_payloads; - mgr->max_lane_count = max_lane_count; - mgr->max_link_rate = max_link_rate; mgr->conn_base_id = conn_base_id; - if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 || - max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8) - return -EINVAL; - mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL); - if (!mgr->payloads) - return -ENOMEM; - mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL); - if (!mgr->proposed_vcpis) - return -ENOMEM; - set_bit(0, &mgr->payload_mask); mst_state = kzalloc(sizeof(*mst_state), GFP_KERNEL); if (mst_state == NULL) @@ -5721,19 +5456,12 @@ void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) destroy_workqueue(mgr->delayed_destroy_wq); mgr->delayed_destroy_wq = NULL; } - mutex_lock(&mgr->payload_lock); - kfree(mgr->payloads); - mgr->payloads = NULL; - kfree(mgr->proposed_vcpis); - mgr->proposed_vcpis = NULL; - mutex_unlock(&mgr->payload_lock); mgr->dev = NULL; mgr->aux = NULL; drm_atomic_private_obj_fini(&mgr->base); mgr->funcs = NULL; mutex_destroy(&mgr->delayed_destroy_lock); - mutex_destroy(&mgr->payload_lock); mutex_destroy(&mgr->qlock); mutex_destroy(&mgr->lock); mutex_destroy(&mgr->up_req_lock); diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 1b067cd73261..13abe2b2170e 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -52,6 +52,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, struct drm_atomic_state *state = crtc_state->uapi.state; struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_dp *intel_dp = &intel_mst->primary->dp; + struct drm_dp_mst_topology_state *mst_state; struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *i915 = to_i915(connector->base.dev); @@ -60,22 +61,28 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, bool constant_n = drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_CONSTANT_N); int bpp, slots = -EINVAL; + mst_state = drm_atomic_get_mst_topology_state(state, &intel_dp->mst_mgr); + if (IS_ERR(mst_state)) + return PTR_ERR(mst_state); + crtc_state->lane_count = limits->max_lane_count; crtc_state->port_clock = limits->max_rate; + // TODO: Handle pbn_div changes by adding a new MST helper + if (!mst_state->pbn_div) { + mst_state->pbn_div = drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr, + limits->max_rate, + limits->max_lane_count); + } + for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) { crtc_state->pipe_bpp = bpp; crtc_state->pbn = drm_dp_calc_pbn_mode(adjusted_mode->crtc_clock, crtc_state->pipe_bpp, false); - slots = drm_dp_atomic_find_time_slots(state, &intel_dp->mst_mgr, - connector->port, - crtc_state->pbn, - drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr, - crtc_state->port_clock, - crtc_state->lane_count)); + connector->port, crtc_state->pbn); if (slots == -EDEADLK) return slots; if (slots >= 0) @@ -360,21 +367,17 @@ static void intel_mst_disable_dp(struct intel_atomic_state *state, struct intel_dp *intel_dp = &dig_port->dp; struct intel_connector *connector = to_intel_connector(old_conn_state->connector); + struct drm_dp_mst_topology_state *mst_state = + drm_atomic_get_mst_topology_state(&state->base, &intel_dp->mst_mgr); struct drm_i915_private *i915 = to_i915(connector->base.dev); - int start_slot = intel_dp_is_uhbr(old_crtc_state) ? 0 : 1; - int ret; drm_dbg_kms(&i915->drm, "active links %d\n", intel_dp->active_mst_links); intel_hdcp_disable(intel_mst->connector); - drm_dp_mst_reset_vcpi_slots(&intel_dp->mst_mgr, connector->port); - - ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, start_slot); - if (ret) { - drm_dbg_kms(&i915->drm, "failed to update payload %d\n", ret); - } + drm_dp_remove_payload(&intel_dp->mst_mgr, mst_state, + drm_atomic_get_mst_payload_state(mst_state, connector->port)); intel_audio_codec_disable(encoder, old_crtc_state, old_conn_state); } @@ -402,8 +405,6 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, intel_disable_transcoder(old_crtc_state); - drm_dp_update_payload_part2(&intel_dp->mst_mgr); - clear_act_sent(encoder, old_crtc_state); intel_de_rmw(dev_priv, TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder), @@ -411,8 +412,6 @@ static void intel_mst_post_disable_dp(struct intel_atomic_state *state, wait_for_act_sent(encoder, old_crtc_state); - drm_dp_mst_deallocate_vcpi(&intel_dp->mst_mgr, connector->port); - intel_ddi_disable_transcoder_func(old_crtc_state); if (DISPLAY_VER(dev_priv) >= 9) @@ -479,7 +478,8 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_connector *connector = to_intel_connector(conn_state->connector); - int start_slot = intel_dp_is_uhbr(pipe_config) ? 0 : 1; + struct drm_dp_mst_topology_state *mst_state = + drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr); int ret; bool first_mst_stream; @@ -505,16 +505,13 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, dig_port->base.pre_enable(state, &dig_port->base, pipe_config, NULL); - ret = drm_dp_mst_allocate_vcpi(&intel_dp->mst_mgr, - connector->port, - pipe_config->pbn, - pipe_config->dp_m_n.tu); - if (!ret) - drm_err(&dev_priv->drm, "failed to allocate vcpi\n"); - intel_dp->active_mst_links++; - ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr, start_slot); + ret = drm_dp_add_payload_part1(&intel_dp->mst_mgr, mst_state, + drm_atomic_get_mst_payload_state(mst_state, connector->port)); + if (ret < 0) + drm_err(&dev_priv->drm, "Failed to create MST payload for %s: %d\n", + connector->base.name, ret); /* * Before Gen 12 this is not done as part of @@ -537,7 +534,10 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_digital_port *dig_port = intel_mst->primary; struct intel_dp *intel_dp = &dig_port->dp; + struct intel_connector *connector = to_intel_connector(conn_state->connector); struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct drm_dp_mst_topology_state *mst_state = + drm_atomic_get_new_mst_topology_state(&state->base, &intel_dp->mst_mgr); enum transcoder trans = pipe_config->cpu_transcoder; drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder); @@ -565,7 +565,8 @@ static void intel_mst_enable_dp(struct intel_atomic_state *state, wait_for_act_sent(encoder, pipe_config); - drm_dp_update_payload_part2(&intel_dp->mst_mgr); + drm_dp_add_payload_part2(&intel_dp->mst_mgr, &state->base, + drm_atomic_get_mst_payload_state(mst_state, connector->port)); if (DISPLAY_VER(dev_priv) >= 12 && pipe_config->fec_enable) intel_de_rmw(dev_priv, CHICKEN_TRANS(trans), 0, @@ -949,8 +950,6 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id) struct intel_dp *intel_dp = &dig_port->dp; enum port port = dig_port->base.port; int ret; - int max_source_rate = - intel_dp->source_rates[intel_dp->num_source_rates - 1]; if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp)) return 0; @@ -966,10 +965,7 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id) /* create encoders */ intel_dp_create_fake_mst_encoders(dig_port); ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm, - &intel_dp->aux, 16, 3, - dig_port->max_lanes, - max_source_rate, - conn_base_id); + &intel_dp->aux, 16, 3, conn_base_id); if (ret) { intel_dp->mst_mgr.cbs = NULL; return ret; diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 8ea66a2e1b09..7dbc9f0bb24f 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -30,8 +30,30 @@ static int intel_conn_to_vcpi(struct intel_connector *connector) { + struct drm_dp_mst_topology_mgr *mgr; + struct drm_dp_mst_atomic_payload *payload; + struct drm_dp_mst_topology_state *mst_state; + int vcpi = 0; + /* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */ - return connector->port ? connector->port->vcpi.vcpi : 0; + if (!connector->port) + return 0; + mgr = connector->port->mgr; + + drm_modeset_lock(&mgr->base.lock, NULL); + mst_state = to_drm_dp_mst_topology_state(mgr->base.state); + payload = drm_atomic_get_mst_payload_state(mst_state, connector->port); + if (drm_WARN_ON(mgr->dev, !payload)) + goto out; + + vcpi = payload->vcpi; + if (drm_WARN_ON(mgr->dev, vcpi < 0)) { + vcpi = 0; + goto out; + } +out: + drm_modeset_unlock(&mgr->base.lock); + return vcpi; } /* diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index 7e9a0b50bb42..33c97d510999 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -932,6 +932,7 @@ struct nv50_msto { struct nv50_head *head; struct nv50_mstc *mstc; bool disabled; + bool enabled; }; struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder) @@ -947,57 +948,37 @@ struct nouveau_encoder *nv50_real_outp(struct drm_encoder *encoder) return msto->mstc->mstm->outp; } -static struct drm_dp_payload * -nv50_msto_payload(struct nv50_msto *msto) -{ - struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev); - struct nv50_mstc *mstc = msto->mstc; - struct nv50_mstm *mstm = mstc->mstm; - int vcpi = mstc->port->vcpi.vcpi, i; - - WARN_ON(!mutex_is_locked(&mstm->mgr.payload_lock)); - - NV_ATOMIC(drm, "%s: vcpi %d\n", msto->encoder.name, vcpi); - for (i = 0; i < mstm->mgr.max_payloads; i++) { - struct drm_dp_payload *payload = &mstm->mgr.payloads[i]; - NV_ATOMIC(drm, "%s: %d: vcpi %d start 0x%02x slots 0x%02x\n", - mstm->outp->base.base.name, i, payload->vcpi, - payload->start_slot, payload->num_slots); - } - - for (i = 0; i < mstm->mgr.max_payloads; i++) { - struct drm_dp_payload *payload = &mstm->mgr.payloads[i]; - if (payload->vcpi == vcpi) - return payload; - } - - return NULL; -} - static void -nv50_msto_cleanup(struct nv50_msto *msto) +nv50_msto_cleanup(struct drm_atomic_state *state, + struct drm_dp_mst_topology_state *mst_state, + struct drm_dp_mst_topology_mgr *mgr, + struct nv50_msto *msto) { struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev); - struct nv50_mstc *mstc = msto->mstc; - struct nv50_mstm *mstm = mstc->mstm; - - if (!msto->disabled) - return; + struct drm_dp_mst_atomic_payload *payload = + drm_atomic_get_mst_payload_state(mst_state, msto->mstc->port); NV_ATOMIC(drm, "%s: msto cleanup\n", msto->encoder.name); - drm_dp_mst_deallocate_vcpi(&mstm->mgr, mstc->port); - - msto->mstc = NULL; - msto->disabled = false; + if (msto->disabled) { + msto->mstc = NULL; + msto->disabled = false; + } else if (msto->enabled) { + drm_dp_add_payload_part2(mgr, state, payload); + msto->enabled = false; + } } static void -nv50_msto_prepare(struct nv50_msto *msto) +nv50_msto_prepare(struct drm_atomic_state *state, + struct drm_dp_mst_topology_state *mst_state, + struct drm_dp_mst_topology_mgr *mgr, + struct nv50_msto *msto) { struct nouveau_drm *drm = nouveau_drm(msto->encoder.dev); struct nv50_mstc *mstc = msto->mstc; struct nv50_mstm *mstm = mstc->mstm; + struct drm_dp_mst_atomic_payload *payload; struct { struct nv50_disp_mthd_v1 base; struct nv50_disp_sor_dp_mst_vcpi_v0 vcpi; @@ -1009,17 +990,21 @@ nv50_msto_prepare(struct nv50_msto *msto) (0x0100 << msto->head->base.index), }; - mutex_lock(&mstm->mgr.payload_lock); - NV_ATOMIC(drm, "%s: msto prepare\n", msto->encoder.name); - if (mstc->port->vcpi.vcpi > 0) { - struct drm_dp_payload *payload = nv50_msto_payload(msto); - if (payload) { - args.vcpi.start_slot = payload->start_slot; - args.vcpi.num_slots = payload->num_slots; - args.vcpi.pbn = mstc->port->vcpi.pbn; - args.vcpi.aligned_pbn = mstc->port->vcpi.aligned_pbn; - } + + payload = drm_atomic_get_mst_payload_state(mst_state, mstc->port); + + // TODO: Figure out if we want to do a better job of handling VCPI allocation failures here? + if (msto->disabled) { + drm_dp_remove_payload(mgr, mst_state, payload); + } else { + if (msto->enabled) + drm_dp_add_payload_part1(mgr, mst_state, payload); + + args.vcpi.start_slot = payload->vc_start_slot; + args.vcpi.num_slots = payload->time_slots; + args.vcpi.pbn = payload->pbn; + args.vcpi.aligned_pbn = payload->time_slots * mst_state->pbn_div; } NV_ATOMIC(drm, "%s: %s: %02x %02x %04x %04x\n", @@ -1028,7 +1013,6 @@ nv50_msto_prepare(struct nv50_msto *msto) args.vcpi.pbn, args.vcpi.aligned_pbn); nvif_mthd(&drm->display->disp.object, 0, &args, sizeof(args)); - mutex_unlock(&mstm->mgr.payload_lock); } static int @@ -1038,6 +1022,7 @@ nv50_msto_atomic_check(struct drm_encoder *encoder, { struct drm_atomic_state *state = crtc_state->state; struct drm_connector *connector = conn_state->connector; + struct drm_dp_mst_topology_state *mst_state; struct nv50_mstc *mstc = nv50_mstc(connector); struct nv50_mstm *mstm = mstc->mstm; struct nv50_head_atom *asyh = nv50_head_atom(crtc_state); @@ -1065,8 +1050,18 @@ nv50_msto_atomic_check(struct drm_encoder *encoder, false); } - slots = drm_dp_atomic_find_time_slots(state, &mstm->mgr, mstc->port, - asyh->dp.pbn, 0); + mst_state = drm_atomic_get_mst_topology_state(state, &mstm->mgr); + if (IS_ERR(mst_state)) + return PTR_ERR(mst_state); + + if (!mst_state->pbn_div) { + struct nouveau_encoder *outp = mstc->mstm->outp; + + mst_state->pbn_div = drm_dp_get_vc_payload_bw(&mstm->mgr, + outp->dp.link_bw, outp->dp.link_nr); + } + + slots = drm_dp_atomic_find_time_slots(state, &mstm->mgr, mstc->port, asyh->dp.pbn); if (slots < 0) return slots; @@ -1098,7 +1093,6 @@ nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *st struct drm_connector *connector; struct drm_connector_list_iter conn_iter; u8 proto; - bool r; drm_connector_list_iter_begin(encoder->dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { @@ -1113,10 +1107,6 @@ nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *st if (WARN_ON(!mstc)) return; - r = drm_dp_mst_allocate_vcpi(&mstm->mgr, mstc->port, asyh->dp.pbn, asyh->dp.tu); - if (!r) - DRM_DEBUG_KMS("Failed to allocate VCPI\n"); - if (!mstm->links++) nv50_outp_acquire(mstm->outp, false /*XXX: MST audio.*/); @@ -1129,6 +1119,7 @@ nv50_msto_atomic_enable(struct drm_encoder *encoder, struct drm_atomic_state *st nv50_dp_bpc_to_depth(asyh->or.bpc)); msto->mstc = mstc; + msto->enabled = true; mstm->modified = true; } @@ -1139,8 +1130,6 @@ nv50_msto_atomic_disable(struct drm_encoder *encoder, struct drm_atomic_state *s struct nv50_mstc *mstc = msto->mstc; struct nv50_mstm *mstm = mstc->mstm; - drm_dp_mst_reset_vcpi_slots(&mstm->mgr, mstc->port); - mstm->outp->update(mstm->outp, msto->head->base.index, NULL, 0, 0); mstm->modified = true; if (!--mstm->links) @@ -1360,7 +1349,9 @@ nv50_mstc_new(struct nv50_mstm *mstm, struct drm_dp_mst_port *port, } static void -nv50_mstm_cleanup(struct nv50_mstm *mstm) +nv50_mstm_cleanup(struct drm_atomic_state *state, + struct drm_dp_mst_topology_state *mst_state, + struct nv50_mstm *mstm) { struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev); struct drm_encoder *encoder; @@ -1368,14 +1359,12 @@ nv50_mstm_cleanup(struct nv50_mstm *mstm) NV_ATOMIC(drm, "%s: mstm cleanup\n", mstm->outp->base.base.name); drm_dp_check_act_status(&mstm->mgr); - drm_dp_update_payload_part2(&mstm->mgr); - drm_for_each_encoder(encoder, mstm->outp->base.base.dev) { if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) { struct nv50_msto *msto = nv50_msto(encoder); struct nv50_mstc *mstc = msto->mstc; if (mstc && mstc->mstm == mstm) - nv50_msto_cleanup(msto); + nv50_msto_cleanup(state, mst_state, &mstm->mgr, msto); } } @@ -1383,20 +1372,34 @@ nv50_mstm_cleanup(struct nv50_mstm *mstm) } static void -nv50_mstm_prepare(struct nv50_mstm *mstm) +nv50_mstm_prepare(struct drm_atomic_state *state, + struct drm_dp_mst_topology_state *mst_state, + struct nv50_mstm *mstm) { struct nouveau_drm *drm = nouveau_drm(mstm->outp->base.base.dev); struct drm_encoder *encoder; NV_ATOMIC(drm, "%s: mstm prepare\n", mstm->outp->base.base.name); - drm_dp_update_payload_part1(&mstm->mgr, 1); + /* Disable payloads first */ drm_for_each_encoder(encoder, mstm->outp->base.base.dev) { if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) { struct nv50_msto *msto = nv50_msto(encoder); struct nv50_mstc *mstc = msto->mstc; - if (mstc && mstc->mstm == mstm) - nv50_msto_prepare(msto); + if (mstc && mstc->mstm == mstm && msto->disabled) + nv50_msto_prepare(state, mst_state, &mstm->mgr, msto); + } + } + + /* Add payloads for new heads, while also updating the start slots of any unmodified (but + * active) heads that may have had their VC slots shifted left after the previous step + */ + drm_for_each_encoder(encoder, mstm->outp->base.base.dev) { + if (encoder->encoder_type == DRM_MODE_ENCODER_DPMST) { + struct nv50_msto *msto = nv50_msto(encoder); + struct nv50_mstc *mstc = msto->mstc; + if (mstc && mstc->mstm == mstm && !msto->disabled) + nv50_msto_prepare(state, mst_state, &mstm->mgr, msto); } } @@ -1593,9 +1596,7 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max, mstm->mgr.cbs = &nv50_mstm; ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max, - max_payloads, outp->dcb->dpconf.link_nr, - drm_dp_bw_code_to_link_rate(outp->dcb->dpconf.link_bw), - conn_base_id); + max_payloads, conn_base_id); if (ret) return ret; @@ -2047,20 +2048,20 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe) static void nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock) { + struct drm_dp_mst_topology_mgr *mgr; + struct drm_dp_mst_topology_state *mst_state; struct nouveau_drm *drm = nouveau_drm(state->dev); struct nv50_disp *disp = nv50_disp(drm->dev); struct nv50_core *core = disp->core; struct nv50_mstm *mstm; - struct drm_encoder *encoder; + int i; NV_ATOMIC(drm, "commit core %08x\n", interlock[NV50_DISP_INTERLOCK_BASE]); - drm_for_each_encoder(encoder, drm->dev) { - if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) { - mstm = nouveau_encoder(encoder)->dp.mstm; - if (mstm && mstm->modified) - nv50_mstm_prepare(mstm); - } + for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { + mstm = nv50_mstm(mgr); + if (mstm->modified) + nv50_mstm_prepare(state, mst_state, mstm); } core->func->ntfy_init(disp->sync, NV50_DISP_CORE_NTFY); @@ -2069,12 +2070,10 @@ nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock) disp->core->chan.base.device)) NV_ERROR(drm, "core notifier timeout\n"); - drm_for_each_encoder(encoder, drm->dev) { - if (encoder->encoder_type != DRM_MODE_ENCODER_DPMST) { - mstm = nouveau_encoder(encoder)->dp.mstm; - if (mstm && mstm->modified) - nv50_mstm_cleanup(mstm); - } + for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { + mstm = nv50_mstm(mgr); + if (mstm->modified) + nv50_mstm_cleanup(state, mst_state, mstm); } } diff --git a/include/drm/display/drm_dp_mst_helper.h b/include/drm/display/drm_dp_mst_helper.h index 8b847836a0b4..43f58cef4eec 100644 --- a/include/drm/display/drm_dp_mst_helper.h +++ b/include/drm/display/drm_dp_mst_helper.h @@ -48,20 +48,6 @@ struct drm_dp_mst_topology_ref_history { struct drm_dp_mst_branch; -/** - * struct drm_dp_vcpi - Virtual Channel Payload Identifier - * @vcpi: Virtual channel ID. - * @pbn: Payload Bandwidth Number for this channel - * @aligned_pbn: PBN aligned with slot size - * @num_slots: number of slots for this PBN - */ -struct drm_dp_vcpi { - int vcpi; - int pbn; - int aligned_pbn; - int num_slots; -}; - /** * struct drm_dp_mst_port - MST port * @port_num: port number @@ -142,7 +128,6 @@ struct drm_dp_mst_port { struct drm_dp_aux aux; /* i2c bus for this port? */ struct drm_dp_mst_branch *parent; - struct drm_dp_vcpi vcpi; struct drm_connector *connector; struct drm_dp_mst_topology_mgr *mgr; @@ -527,19 +512,6 @@ struct drm_dp_mst_topology_cbs { void (*poll_hpd_irq)(struct drm_dp_mst_topology_mgr *mgr); }; -#define DP_MAX_PAYLOAD (sizeof(unsigned long) * 8) - -#define DP_PAYLOAD_LOCAL 1 -#define DP_PAYLOAD_REMOTE 2 -#define DP_PAYLOAD_DELETE_LOCAL 3 - -struct drm_dp_payload { - int payload_state; - int start_slot; - int num_slots; - int vcpi; -}; - #define to_dp_mst_topology_state(x) container_of(x, struct drm_dp_mst_topology_state, base) /** @@ -552,6 +524,34 @@ struct drm_dp_mst_atomic_payload { /** @port: The MST port assigned to this payload */ struct drm_dp_mst_port *port; + /** + * @vc_start_slot: The time slot that this payload starts on. Because payload start slots + * can't be determined ahead of time, the contents of this value are UNDEFINED at atomic + * check time. This shouldn't usually matter, as the start slot should never be relevant for + * atomic state computations. + * + * Since this value is determined at commit time instead of check time, this value is + * protected by the MST helpers ensuring that async commits operating on the given topology + * never run in parallel. In the event that a driver does need to read this value (e.g. to + * inform hardware of the starting timeslot for a payload), the driver may either: + * + * * Read this field during the atomic commit after + * drm_dp_mst_atomic_wait_for_dependencies() has been called, which will ensure the + * previous MST states payload start slots have been copied over to the new state. Note + * that a new start slot won't be assigned/removed from this payload until + * drm_dp_add_payload_part1()/drm_dp_remove_payload() have been called. + * * Acquire the MST modesetting lock, and then wait for any pending MST-related commits to + * get committed to hardware by calling drm_crtc_commit_wait() on each of the + * &drm_crtc_commit structs in &drm_dp_mst_topology_state.commit_deps. + * + * If neither of the two above solutions suffice (e.g. the driver needs to read the start + * slot in the middle of an atomic commit without waiting for some reason), then drivers + * should cache this value themselves after changing payloads. + */ + s8 vc_start_slot; + + /** @vcpi: The Virtual Channel Payload Identifier */ + u8 vcpi; /** * @time_slots: * The number of timeslots allocated to this payload from the source DP Tx to @@ -579,8 +579,6 @@ struct drm_dp_mst_topology_state { /** @base: Base private state for atomic */ struct drm_private_state base; - /** @payloads: The list of payloads being created/destroyed in this state */ - struct list_head payloads; /** @mgr: The topology manager */ struct drm_dp_mst_topology_mgr *mgr; @@ -597,10 +595,21 @@ struct drm_dp_mst_topology_state { /** @num_commit_deps: The number of CRTC commits in @commit_deps */ size_t num_commit_deps; + /** @payload_mask: A bitmask of allocated VCPIs, used for VCPI assignments */ + u32 payload_mask; + /** @payloads: The list of payloads being created/destroyed in this state */ + struct list_head payloads; + /** @total_avail_slots: The total number of slots this topology can handle (63 or 64) */ u8 total_avail_slots; /** @start_slot: The first usable time slot in this topology (1 or 0) */ u8 start_slot; + + /** + * @pbn_div: The current PBN divisor for this topology. The driver is expected to fill this + * out itself. + */ + int pbn_div; }; #define to_dp_mst_topology_mgr(x) container_of(x, struct drm_dp_mst_topology_mgr, base) @@ -640,14 +649,6 @@ struct drm_dp_mst_topology_mgr { * @max_payloads: maximum number of payloads the GPU can generate. */ int max_payloads; - /** - * @max_lane_count: maximum number of lanes the GPU can drive. - */ - int max_lane_count; - /** - * @max_link_rate: maximum link rate per lane GPU can output, in kHz. - */ - int max_link_rate; /** * @conn_base_id: DRM connector ID this mgr is connected to. Only used * to build the MST connector path value. @@ -690,6 +691,20 @@ struct drm_dp_mst_topology_mgr { */ bool payload_id_table_cleared : 1; + /** + * @payload_count: The number of currently active payloads in hardware. This value is only + * intended to be used internally by MST helpers for payload tracking, and is only safe to + * read/write from the atomic commit (not check) context. + */ + u8 payload_count; + + /** + * @next_start_slot: The starting timeslot to use for new VC payloads. This value is used + * internally by MST helpers for payload tracking, and is only safe to read/write from the + * atomic commit (not check) context. + */ + u8 next_start_slot; + /** * @mst_primary: Pointer to the primary/first branch device. */ @@ -703,10 +718,6 @@ struct drm_dp_mst_topology_mgr { * @sink_count: Sink count from DEVICE_SERVICE_IRQ_VECTOR_ESI0. */ u8 sink_count; - /** - * @pbn_div: PBN to slots divisor. - */ - int pbn_div; /** * @funcs: Atomic helper callbacks @@ -723,32 +734,6 @@ struct drm_dp_mst_topology_mgr { */ struct list_head tx_msg_downq; - /** - * @payload_lock: Protect payload information. - */ - struct mutex payload_lock; - /** - * @proposed_vcpis: Array of pointers for the new VCPI allocation. The - * VCPI structure itself is &drm_dp_mst_port.vcpi, and the size of - * this array is determined by @max_payloads. - */ - struct drm_dp_vcpi **proposed_vcpis; - /** - * @payloads: Array of payloads. The size of this array is determined - * by @max_payloads. - */ - struct drm_dp_payload *payloads; - /** - * @payload_mask: Elements of @payloads actually in use. Since - * reallocation of active outputs isn't possible gaps can be created by - * disabling outputs out of order compared to how they've been enabled. - */ - unsigned long payload_mask; - /** - * @vcpi_mask: Similar to @payload_mask, but for @proposed_vcpis. - */ - unsigned long vcpi_mask; - /** * @tx_waitq: Wait to queue stall for the tx worker. */ @@ -820,9 +805,7 @@ struct drm_dp_mst_topology_mgr { int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, struct drm_device *dev, struct drm_dp_aux *aux, int max_dpcd_transaction_bytes, - int max_payloads, - int max_lane_count, int max_link_rate, - int conn_base_id); + int max_payloads, int conn_base_id); void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr); @@ -845,28 +828,17 @@ int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr, int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc); -bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port, int pbn, int slots); - -int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); - - -void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port); - void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_t link_encoding_cap); -void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port); - - -int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, - int pbn); - - -int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr, int start_slot); - - -int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr); +int drm_dp_add_payload_part1(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, + struct drm_dp_mst_atomic_payload *payload); +int drm_dp_add_payload_part2(struct drm_dp_mst_topology_mgr *mgr, + struct drm_atomic_state *state, + struct drm_dp_mst_atomic_payload *payload); +void drm_dp_remove_payload(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_topology_state *mst_state, + struct drm_dp_mst_atomic_payload *payload); int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr); @@ -888,17 +860,22 @@ int drm_dp_mst_connector_late_register(struct drm_connector *connector, void drm_dp_mst_connector_early_unregister(struct drm_connector *connector, struct drm_dp_mst_port *port); -struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, - struct drm_dp_mst_topology_mgr *mgr); +struct drm_dp_mst_topology_state * +drm_atomic_get_mst_topology_state(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr); +struct drm_dp_mst_topology_state * +drm_atomic_get_new_mst_topology_state(struct drm_atomic_state *state, + struct drm_dp_mst_topology_mgr *mgr); +struct drm_dp_mst_atomic_payload * +drm_atomic_get_mst_payload_state(struct drm_dp_mst_topology_state *state, + struct drm_dp_mst_port *port); int __must_check drm_dp_atomic_find_time_slots(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr, - struct drm_dp_mst_port *port, int pbn, - int pbn_div); + struct drm_dp_mst_port *port, int pbn); int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state, struct drm_dp_mst_port *port, - int pbn, int pbn_div, - bool enable); + int pbn, bool enable); int __must_check drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm_dp_mst_topology_mgr *mgr); @@ -922,6 +899,12 @@ void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port); struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port); +static inline struct drm_dp_mst_topology_state * +to_drm_dp_mst_topology_state(struct drm_private_state *state) +{ + return container_of(state, struct drm_dp_mst_topology_state, base); +} + extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs; /** -- cgit v1.2.3-59-g8ed1b From da11ef832972e311475fcba802398e720ed36c54 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Fri, 15 Apr 2022 22:22:41 +0200 Subject: drm/amdgpu: Don't register backlight when another backlight should be used (v3) Before this commit when we want userspace to use the acpi_video backlight device we register both the GPU's native backlight device and acpi_video's firmware acpi_video# backlight device. This relies on userspace preferring firmware type backlight devices over native ones. Registering 2 backlight devices for a single display really is undesirable, don't register the GPU's native backlight device when another backlight device should be used. Changes in v2: - To avoid linker errors when amdgpu is builtin and video_detect.c is in a module, select ACPI_VIDEO and its deps if ACPI is enabled. When ACPI is disabled, ACPI_VIDEO is also always disabled, ensuring the stubs from acpi/video.h will be used. Changes in v3: - Use drm_info(drm_dev, "...") to log messages - ACPI_VIDEO can now be enabled on non X86 too, adjust the Kconfig changes to match this. Acked-by: Alex Deucher Signed-off-by: Hans de Goede --- drivers/gpu/drm/Kconfig | 7 +++++++ drivers/gpu/drm/amd/amdgpu/atombios_encoders.c | 7 +++++++ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 7 +++++++ 3 files changed, 21 insertions(+) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig index 6c2256e8474b..a9139c6c3a09 100644 --- a/drivers/gpu/drm/Kconfig +++ b/drivers/gpu/drm/Kconfig @@ -273,6 +273,13 @@ config DRM_AMDGPU select BACKLIGHT_CLASS_DEVICE select INTERVAL_TREE select DRM_BUDDY + # amdgpu depends on ACPI_VIDEO when ACPI is enabled, for select to work + # ACPI_VIDEO's dependencies must also be selected. + select INPUT if ACPI + select ACPI_VIDEO if ACPI + # On x86 ACPI_VIDEO also needs ACPI_WMI + select X86_PLATFORM_DEVICES if ACPI && X86 + select ACPI_WMI if ACPI && X86 help Choose this option if you have a recent AMD Radeon graphics card. diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index fa7421afb9a6..b4e3cedceaf8 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -26,6 +26,8 @@ #include +#include + #include #include #include "amdgpu.h" @@ -184,6 +186,11 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) return; + if (!acpi_video_backlight_use_native()) { + drm_info(dev, "Skipping amdgpu atom DIG backlight registration\n"); + return; + } + pdata = kmalloc(sizeof(struct amdgpu_backlight_privdata), GFP_KERNEL); if (!pdata) { DRM_ERROR("Memory allocation failed\n"); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 8660d93cc405..a64baff041a7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -89,6 +89,8 @@ #include #include +#include + #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" #include "dcn/dcn_1_0_offset.h" @@ -4055,6 +4057,11 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) amdgpu_dm_update_backlight_caps(dm, dm->num_of_edps); dm->brightness[dm->num_of_edps] = AMDGPU_MAX_BL_LEVEL; + if (!acpi_video_backlight_use_native()) { + drm_info(adev_to_drm(dm->adev), "Skipping amdgpu DM backlight registration\n"); + return; + } + props.max_brightness = AMDGPU_MAX_BL_LEVEL; props.brightness = AMDGPU_MAX_BL_LEVEL; props.type = BACKLIGHT_RAW; -- cgit v1.2.3-59-g8ed1b From 42900348bfb41cbfced62060c9cf4b735119394c Mon Sep 17 00:00:00 2001 From: Roman Li Date: Thu, 11 Aug 2022 10:51:19 -0400 Subject: drm/amd/display: Remove redundant check in atomic_check [Why] We have 2 back-to-back checks for skipping connectors. Logically one of them will do the job. [How] Remove redundant check. Reviewed-by: Hersen Wu > Acked-by: Brian Chang Signed-off-by: Roman Li Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 --- 1 file changed, 3 deletions(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 2412dc601ce2..b0a5924d528e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -9380,9 +9380,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); /* Skip connectors that are disabled or part of modeset already. */ - if (!old_con_state->crtc && !new_con_state->crtc) - continue; - if (!new_con_state->crtc) continue; -- cgit v1.2.3-59-g8ed1b From c17a34e0526fafc0e1e7f707c634d7e49dd08197 Mon Sep 17 00:00:00 2001 From: Ian Chen Date: Thu, 4 Aug 2022 15:44:27 +0800 Subject: drm/amd/display: Refactor edp dsc codes. Refactor edp dsc codes. We split out edp dsc config from "global" to "per-panel" config settings. Reviewed-by: Mike Hsieh Acked-by: Brian Chang Signed-off-by: Ian Chen Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 ++-- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 19 +++++++++++++++++++ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 10 +++++++++- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 4 ++-- drivers/gpu/drm/amd/display/dc/dc.h | 2 -- drivers/gpu/drm/amd/display/dc/dc_link.h | 10 ++++++++++ drivers/gpu/drm/amd/display/dc/dm_helpers.h | 7 ++++++- 7 files changed, 48 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index b0a5924d528e..9d5ec1ea23a6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1529,7 +1529,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) { adev->dm.dc->debug.disable_dsc = true; - adev->dm.dc->debug.disable_dsc_edp = true; } if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) @@ -5629,7 +5628,8 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, dc_dsc_policy_set_enable_dsc_when_not_needed( aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); - if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && !dc->debug.disable_dsc_edp && + if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && + !aconnector->dc_link->panel_config.dsc.disable_dsc_edp && dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) { apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index bd364d2cc4f7..0b7440b92c10 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -916,6 +916,25 @@ void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigne //amdgpu_device_gpu_recover(dc_context->driver-context, NULL); } +void dm_helpers_init_panel_settings( + struct dc_context *ctx, + struct dc_panel_config *panel_config) +{ + // Feature DSC + panel_config->dsc.disable_dsc_edp = false; + panel_config->dsc.force_dsc_edp_policy = 0; +} + +void dm_helpers_override_panel_settings( + struct dc_context *ctx, + struct dc_panel_config *panel_config) +{ + // Feature DSC + if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) { + panel_config->dsc.disable_dsc_edp = true; + } +} + void *dm_helpers_allocate_gpu_mem( struct dc_context *ctx, enum dc_gpu_mem_alloc_type type, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 66d2ae7aacf5..74db87f3dc6f 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1311,6 +1311,14 @@ static bool detect_link_and_local_sink(struct dc_link *link, sink->edid_caps.audio_modes[i].sample_rate, sink->edid_caps.audio_modes[i].sample_size); } + + if (link->connector_signal == SIGNAL_TYPE_EDP) { + // Init dc_panel_config + dm_helpers_init_panel_settings(dc_ctx, &link->panel_config); + // Override dc_panel_config if system has specific settings + dm_helpers_override_panel_settings(dc_ctx, &link->panel_config); + } + } else { /* From Connected-to-Disconnected. */ link->type = dc_connection_none; @@ -4736,7 +4744,7 @@ bool dc_link_should_enable_fec(const struct dc_link *link) else if (link->connector_signal == SIGNAL_TYPE_EDP && (link->dpcd_caps.dsc_caps.dsc_basic_caps.fields. dsc_support.DSC_SUPPORT == false - || link->dc->debug.disable_dsc_edp + || link->panel_config.dsc.disable_dsc_edp || !link->dc->caps.edp_dsc_support)) force_disable = true; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index d2bdf3939cf9..e2413d2908c9 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -3743,7 +3743,7 @@ static bool decide_edp_link_settings_with_dsc(struct dc_link *link, unsigned int policy = 0; - policy = link->ctx->dc->debug.force_dsc_edp_policy; + policy = link->panel_config.dsc.force_dsc_edp_policy; if (max_link_rate == LINK_RATE_UNKNOWN) max_link_rate = link->verified_link_cap.link_rate; /* @@ -3909,7 +3909,7 @@ bool decide_link_settings(struct dc_stream_state *stream, if (stream->timing.flags.DSC) { enum dc_link_rate max_link_rate = LINK_RATE_UNKNOWN; - if (link->ctx->dc->debug.force_dsc_edp_policy) { + if (link->panel_config.dsc.force_dsc_edp_policy) { /* calculate link max link rate cap*/ struct dc_link_settings tmp_link_setting; struct dc_crtc_timing tmp_timing = stream->timing; diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 5e18bdb9785d..ec227e52fdd3 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -802,8 +802,6 @@ struct dc_debug_options { bool validate_dml_output; bool enable_dmcub_surface_flip; bool usbc_combo_phy_reset_wa; - bool disable_dsc_edp; - unsigned int force_dsc_edp_policy; bool enable_dram_clock_change_one_display_vactive; /* TODO - remove once tested */ bool legacy_dp2_lt; diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index d1214944839f..43d250918fd0 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -113,6 +113,15 @@ struct psr_settings { unsigned int psr_power_opt; }; +/* To split out "global" and "per-panel" config settings. + * Add a struct dc_panel_config under dc_link + */ +struct dc_panel_config { + struct dsc { + bool disable_dsc_edp; + unsigned int force_dsc_edp_policy; + } dsc; +}; /* * A link contains one or more sinks and their connected status. * The currently active signal type (HDMI, DP-SST, DP-MST) is also reported. @@ -233,6 +242,7 @@ struct dc_link { struct gpio *hpd_gpio; enum dc_link_fec_state fec_state; + struct dc_panel_config panel_config; enum phy_state phy_state; }; diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index fb6a2d7b6470..6e4d3df0454e 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -171,7 +171,12 @@ void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigne // 0x1 = Result_OK, 0xFE = Result_UnkmownCmd, 0x0 = Status_Busy #define IS_SMU_TIMEOUT(result) \ (result == 0x0) - +void dm_helpers_init_panel_settings( + struct dc_context *ctx, + struct dc_panel_config *config); +void dm_helpers_override_panel_settings( + struct dc_context *ctx, + struct dc_panel_config *config); int dm_helper_dmub_aux_transfer_sync( struct dc_context *ctx, const struct dc_link *link, -- cgit v1.2.3-59-g8ed1b From 41ee1f18ef52390518ed14b8be98640955d8f767 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 30 Aug 2022 17:57:52 -0400 Subject: drm/amd/display: fix documentation for amdgpu_dm_update_freesync_caps() Document missing parameter. Cc: Rodrigo Siqueira Cc: Harry Wentland Fixes: 8889a13f99e5 ("drm/amd/display: Add some extra kernel doc to amdgpu_dm") Reported-by: Stephen Rothwell Reviewed-by: Rodrigo Siqueira Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 9d5ec1ea23a6..b053c13a81a5 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -9936,7 +9936,8 @@ static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, /** * amdgpu_dm_update_freesync_caps - Update Freesync capabilities * - * @aconnector: Connector to query. + * @connector: Connector to query. + * @edid: EDID from monitor * * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep * track of some of the display information in the internal data struct used by -- cgit v1.2.3-59-g8ed1b From c0f50c5de93b8afb2281009a33c124e82973e457 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 16 May 2022 11:32:33 +0200 Subject: drm/amdgpu: Register ACPI video backlight when skipping amdgpu backlight registration Typically the acpi_video driver will initialize before amdgpu, which used to cause /sys/class/backlight/acpi_video0 to get registered and then amdgpu would register its own amdgpu_bl# device later. After which the drivers/acpi/video_detect.c code unregistered the acpi_video0 device to avoid there being 2 backlight devices. This means that userspace used to briefly see 2 devices and the disappearing of acpi_video0 after a brief time confuses the systemd backlight level save/restore code, see e.g.: https://bbs.archlinux.org/viewtopic.php?id=269920 To fix this the ACPI video code has been modified to make backlight class device registration a separate step, relying on the drm/kms driver to ask for the acpi_video backlight registration after it is done setting up its native backlight device. Add a call to the new acpi_video_register_backlight() when amdgpu skips registering its own backlight device because of either the firmware_flags or the acpi_video_get_backlight_type() return value. This ensures that if the acpi_video backlight device should be used, it will be available before the amdgpu drm_device gets registered with userspace. Acked-by: Alex Deucher Signed-off-by: Hans de Goede --- drivers/gpu/drm/amd/amdgpu/atombios_encoders.c | 9 +++++++-- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 ++ 2 files changed, 9 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c index b4e3cedceaf8..6be9ac2b9c5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_encoders.c @@ -184,11 +184,11 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode return; if (!(adev->mode_info.firmware_flags & ATOM_BIOS_INFO_BL_CONTROLLED_BY_GPU)) - return; + goto register_acpi_backlight; if (!acpi_video_backlight_use_native()) { drm_info(dev, "Skipping amdgpu atom DIG backlight registration\n"); - return; + goto register_acpi_backlight; } pdata = kmalloc(sizeof(struct amdgpu_backlight_privdata), GFP_KERNEL); @@ -225,6 +225,11 @@ void amdgpu_atombios_encoder_init_backlight(struct amdgpu_encoder *amdgpu_encode error: kfree(pdata); return; + +register_acpi_backlight: + /* Try registering an ACPI video backlight device instead. */ + acpi_video_register_backlight(); + return; } void diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index a64baff041a7..3f456978dca7 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4059,6 +4059,8 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm) if (!acpi_video_backlight_use_native()) { drm_info(adev_to_drm(dm->adev), "Skipping amdgpu DM backlight registration\n"); + /* Try registering an ACPI video backlight device instead. */ + acpi_video_register_backlight(); return; } -- cgit v1.2.3-59-g8ed1b From 34955a1e797d074e72d0ac6a514d934d8fe80da1 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Fri, 26 Aug 2022 17:53:34 -0400 Subject: drm/amd/display: Don't adjust VRR unnecessarily [why] Do not need to spend time reprogramming DRR if there were no updates to the parameters. [how] Compare the current stream state to the requested one to determine if an update is required. In amdgpu_dm the timing_changed flag is set but never used so can remove it. Similarly, the stream update for VRR is done after dc_commit and should not update its adjust field until after the update is completed. The adjust field is managed by dc_stream_adjust_vmin_vmax and should not be manually updated in amdgpu_dm. Tested-by: Daniel Wheeler Reviewed-by: Anthony Koo Acked-by: Pavle Kotarac Signed-off-by: Aric Cyr Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 10 ---------- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 1 - drivers/gpu/drm/amd/display/dc/core/dc.c | 3 +++ 3 files changed, 3 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 7a93162633ae..819d61f2307b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7370,11 +7370,6 @@ static void update_freesync_state_on_stream( &vrr_infopacket, pack_sdp_v1_3); - new_crtc_state->freesync_timing_changed |= - (memcmp(&acrtc->dm_irq_params.vrr_params.adjust, - &vrr_params.adjust, - sizeof(vrr_params.adjust)) != 0); - new_crtc_state->freesync_vrr_info_changed |= (memcmp(&new_crtc_state->vrr_infopacket, &vrr_infopacket, @@ -7383,7 +7378,6 @@ static void update_freesync_state_on_stream( acrtc->dm_irq_params.vrr_params = vrr_params; new_crtc_state->vrr_infopacket = vrr_infopacket; - new_stream->adjust = acrtc->dm_irq_params.vrr_params.adjust; new_stream->vrr_infopacket = vrr_infopacket; if (new_crtc_state->freesync_vrr_info_changed) @@ -7446,10 +7440,6 @@ static void update_stream_irq_parameters( new_stream, &config, &vrr_params); - new_crtc_state->freesync_timing_changed |= - (memcmp(&acrtc->dm_irq_params.vrr_params.adjust, - &vrr_params.adjust, sizeof(vrr_params.adjust)) != 0); - new_crtc_state->freesync_config = config; /* Copy state for access from DM IRQ handler */ acrtc->dm_irq_params.freesync_config = config; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index b44faaad9b0b..b5ce15c43bcc 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -681,7 +681,6 @@ struct dm_crtc_state { int crc_skip_count; - bool freesync_timing_changed; bool freesync_vrr_info_changed; bool dsc_force_changed; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 9860bf38c547..7481801c6d7c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -401,6 +401,9 @@ bool dc_stream_adjust_vmin_vmax(struct dc *dc, { int i; + if (memcmp(adjust, &stream->adjust, sizeof(struct dc_crtc_timing_adjust)) == 0) + return true; + stream->adjust.v_total_max = adjust->v_total_max; stream->adjust.v_total_mid = adjust->v_total_mid; stream->adjust.v_total_mid_frame_num = adjust->v_total_mid_frame_num; -- cgit v1.2.3-59-g8ed1b From 7136f956c73c4ba50bfeb61653dfd6a9669ea915 Mon Sep 17 00:00:00 2001 From: Rafael Mendonca Date: Mon, 12 Sep 2022 19:34:32 -0300 Subject: drm/amdgpu: Fix memory leak in hpd_rx_irq_create_workqueue() If construction of the array of work queues to handle hpd_rx_irq offload work fails, we need to unwind. Destroy all the created workqueues and the allocated memory for the hpd_rx_irq_offload_work_queue struct array. Fixes: 8e794421bc98 ("drm/amd/display: Fork thread to offload work of hpd_rx_irq") Signed-off-by: Rafael Mendonca Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 819d61f2307b..4d92e55b4f64 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1296,13 +1296,21 @@ static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct if (hpd_rx_offload_wq[i].wq == NULL) { DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!"); - return NULL; + goto out_err; } spin_lock_init(&hpd_rx_offload_wq[i].offload_lock); } return hpd_rx_offload_wq; + +out_err: + for (i = 0; i < max_caps; i++) { + if (hpd_rx_offload_wq[i].wq) + destroy_workqueue(hpd_rx_offload_wq[i].wq); + } + kfree(hpd_rx_offload_wq); + return NULL; } struct amdgpu_stutter_quirk { -- cgit v1.2.3-59-g8ed1b From ee10818362d81ca1796cacdfb8a6c6f857cdb34a Mon Sep 17 00:00:00 2001 From: Xu Panda Date: Mon, 12 Sep 2022 03:22:42 +0000 Subject: drm/amd/display/amdgpu_dm: remove duplicate included header files soc15_common.h is included more than once. Reported-by: Zeal Robot Signed-off-by: Xu Panda Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 4d92e55b4f64..5da2a1d97b81 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -98,8 +98,6 @@ #include "soc15_common.h" #include "vega10_ip_offset.h" -#include "soc15_common.h" - #include "gc/gc_11_0_0_offset.h" #include "gc/gc_11_0_0_sh_mask.h" -- cgit v1.2.3-59-g8ed1b From 22c42b0ec225c92db33e4b3045ad15baf1427cff Mon Sep 17 00:00:00 2001 From: Leo Li Date: Tue, 30 Aug 2022 16:38:16 -0400 Subject: drm/amd/display: Fix double cursor on non-video RGB MPO [Why] DC makes use of layer_index (zpos) when picking the HW plane to enable HW cursor on. However, some compositors will not attach zpos information to each DRM plane. Consequently, in amdgpu, we default layer_index to 0 and do not update it. This causes said DC logic to enable HW cursor on all planes of the same layer_index, which manifests as a double cursor issue if one of the planes is scaled (and hence scaling the cursor as well). [How] Use DRM core helpers to calculate a normalized_zpos value for each drm_plane_state under each crtc, within the atomic state. This helper will first consider existing zpos values, and if identical/unset, fallback to plane ID ordering. The normalized_zpos is then passed to dc_plane_info during atomic check for later use by the cursor logic. Reviewed-by: Bhawanpreet Lakha Acked-by: Wayne Lin Signed-off-by: Leo Li Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 5da2a1d97b81..e53562d9362b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -4739,7 +4739,7 @@ fill_dc_plane_info_and_addr(struct amdgpu_device *adev, plane_info->visible = true; plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; - plane_info->layer_index = 0; + plane_info->layer_index = plane_state->normalized_zpos; ret = fill_plane_color_attributes(plane_state, plane_info->format, &plane_info->color_space); @@ -4807,7 +4807,7 @@ static int fill_dc_plane_attributes(struct amdgpu_device *adev, dc_plane_state->global_alpha = plane_info.global_alpha; dc_plane_state->global_alpha_value = plane_info.global_alpha_value; dc_plane_state->dcc = plane_info.dcc; - dc_plane_state->layer_index = plane_info.layer_index; // Always returns 0 + dc_plane_state->layer_index = plane_info.layer_index; dc_plane_state->flip_int_enabled = true; /* @@ -9460,6 +9460,14 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, } } + /* + * DC consults the zpos (layer_index in DC terminology) to determine the + * hw plane on which to enable the hw cursor (see + * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in + * atomic state, so call drm helper to normalize zpos. + */ + drm_atomic_normalize_zpos(dev, state); + /* Remove exiting planes if they are modified */ for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { ret = dm_update_plane_state(dc, state, plane, -- cgit v1.2.3-59-g8ed1b From f6aa84b83aee629fbbbc4ea16c2c142caf920d5a Mon Sep 17 00:00:00 2001 From: Roman Li Date: Thu, 29 Sep 2022 14:37:00 -0400 Subject: drm/amd/display: Enable dpia support for dcn314 [Why] DCN 3.1.4 supports DPIA. [How] - Set dpia_supported flag for dcn314 in dmub_hw_init() - Remove comment that becomes irrelevant after this change. Signed-off-by: Roman Li Reviewed-by: Nicholas Kazlauskas Reviewed-by: Mario Limonciello Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org # 6.0.x --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 4c73727e0b7d..d5222d5e3a61 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1110,7 +1110,8 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) hw_params.fb[i] = &fb_info->fb[i]; switch (adev->ip_versions[DCE_HWIP][0]) { - case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */ + case IP_VERSION(3, 1, 3): + case IP_VERSION(3, 1, 4): hw_params.dpia_supported = true; hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; break; -- cgit v1.2.3-59-g8ed1b From 8799c0be89ebb99a16098bdf618f49f817bef76a Mon Sep 17 00:00:00 2001 From: Yunxiang Li Date: Wed, 21 Sep 2022 17:20:19 -0400 Subject: drm/amd/display: Fix vblank refcount in vrr transition manage_dm_interrupts disable/enable vblank using drm_crtc_vblank_off/on which causes drm_crtc_vblank_get in vrr_transition to fail, and later when drm_crtc_vblank_put is called the refcount on vblank will be messed up. Therefore move the call to after manage_dm_interrupts. Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1247 Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1380 Tested-by: Daniel Wheeler Reviewed-by: Rodrigo Siqueira Signed-off-by: Yunxiang Li Signed-off-by: Rodrigo Siqueira Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 55 +++++++++++------------ 1 file changed, 26 insertions(+), 29 deletions(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index d5222d5e3a61..b84aedb707b8 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7479,15 +7479,15 @@ static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, * We also need vupdate irq for the actual core vblank handling * at end of vblank. */ - dm_set_vupdate_irq(new_state->base.crtc, true); - drm_crtc_vblank_get(new_state->base.crtc); + WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, true) != 0); + WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0); DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", __func__, new_state->base.crtc->base.id); } else if (old_vrr_active && !new_vrr_active) { /* Transition VRR active -> inactive: * Allow vblank irq disable again for fixed refresh rate. */ - dm_set_vupdate_irq(new_state->base.crtc, false); + WARN_ON(dm_set_vupdate_irq(new_state->base.crtc, false) != 0); drm_crtc_vblank_put(new_state->base.crtc); DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", __func__, new_state->base.crtc->base.id); @@ -8243,23 +8243,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) mutex_unlock(&dm->dc_lock); } - /* Count number of newly disabled CRTCs for dropping PM refs later. */ - for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, - new_crtc_state, i) { - if (old_crtc_state->active && !new_crtc_state->active) - crtc_disable_count++; - - dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); - dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); - - /* For freesync config update on crtc state and params for irq */ - update_stream_irq_parameters(dm, dm_new_crtc_state); - - /* Handle vrr on->off / off->on transitions */ - amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, - dm_new_crtc_state); - } - /** * Enable interrupts for CRTCs that are newly enabled or went through * a modeset. It was intentionally deferred until after the front end @@ -8269,16 +8252,29 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); #ifdef CONFIG_DEBUG_FS - bool configure_crc = false; enum amdgpu_dm_pipe_crc_source cur_crc_src; #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) - struct crc_rd_work *crc_rd_wrk = dm->crc_rd_wrk; + struct crc_rd_work *crc_rd_wrk; +#endif +#endif + /* Count number of newly disabled CRTCs for dropping PM refs later. */ + if (old_crtc_state->active && !new_crtc_state->active) + crtc_disable_count++; + + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + + /* For freesync config update on crtc state and params for irq */ + update_stream_irq_parameters(dm, dm_new_crtc_state); + +#ifdef CONFIG_DEBUG_FS +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + crc_rd_wrk = dm->crc_rd_wrk; #endif spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); cur_crc_src = acrtc->dm_irq_params.crc_src; spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); #endif - dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); if (new_crtc_state->active && (!old_crtc_state->active || @@ -8286,16 +8282,19 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) dc_stream_retain(dm_new_crtc_state->stream); acrtc->dm_irq_params.stream = dm_new_crtc_state->stream; manage_dm_interrupts(adev, acrtc, true); + } + /* Handle vrr on->off / off->on transitions */ + amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state); #ifdef CONFIG_DEBUG_FS + if (new_crtc_state->active && + (!old_crtc_state->active || + drm_atomic_crtc_needs_modeset(new_crtc_state))) { /** * Frontend may have changed so reapply the CRC capture * settings for the stream. */ - dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); - if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { - configure_crc = true; #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) if (amdgpu_dm_crc_window_is_activated(crtc)) { spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); @@ -8307,12 +8306,10 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); } #endif - } - - if (configure_crc) if (amdgpu_dm_crtc_configure_crc_source( crtc, dm_new_crtc_state, cur_crc_src)) DRM_DEBUG_DRIVER("Failed to configure crc source"); + } #endif } } -- cgit v1.2.3-59-g8ed1b From 876fcc4222e1d0e5b73343f4010a8b66be058f48 Mon Sep 17 00:00:00 2001 From: Fangzhi Zuo Date: Tue, 30 Aug 2022 12:12:53 -0400 Subject: drm/amd/display: Validate DSC After Enable All New CRTCs Before enabling new crtc, stream_count in dc_state does not sync with that in drm_atomic_state. Validating dsc in such case would leave newly added stream not jointly participating in dsc optimization with existing streams, but simply using default initialized vcpi all the time which gives wrong dsc determination decision. Consider the scenaio where one 4k60 connected to the dock under dp-alt mode. Since dp-alt mode is 2-lane setup, stream 1 consumes 63 slots with dsc needed. Then hook up a second 4k60 to the dock. stream 2 connected with 65 slot initialized by default without dsc. dsc pre validate will not jointly optimize stream 2 with stream 1 before crtc 2 added into the dc_state. That leads to stream 2 not getting dsc optimization, and trigger atomic_check failure all the time, as 65 > 63 limit. After getting all new crtcs added into the state, stream_count in dc_state correctly reflect that in drm_atomic_state which comes up with correct dsc decision. Fixes: 71be4b16d39a ("drm/amd/display: dsc validate fail not pass to atomic check") Tested-by: Daniel Wheeler Reviewed-by: Roman Li Acked-by: Qingqing Zhuo Signed-off-by: Fangzhi Zuo Tested-by: Mark Broadworth Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index b84aedb707b8..f6a9e8fdd87d 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -9390,10 +9390,6 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, } } } - if (!pre_validate_dsc(state, &dm_state, vars)) { - ret = -EINVAL; - goto fail; - } } #endif for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { @@ -9527,6 +9523,15 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, } } +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dc_resource_is_dsc_encoding_supported(dc)) { + if (!pre_validate_dsc(state, &dm_state, vars)) { + ret = -EINVAL; + goto fail; + } + } +#endif + /* Run this here since we want to validate the streams we created */ ret = drm_atomic_helper_check_planes(dev, state); if (ret) { -- cgit v1.2.3-59-g8ed1b From 2130b87b2273389cafe6765bf09ef564cda01407 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Fri, 14 Oct 2022 08:21:03 -0700 Subject: drm/amd/display: Fix build breakage with CONFIG_DEBUG_FS=n MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After commit 8799c0be89eb ("drm/amd/display: Fix vblank refcount in vrr transition"), a build with CONFIG_DEBUG_FS=n is broken due to a misplaced brace, along the lines of: In file included from drivers/gpu/drm/amd/amdgpu/../display/amdgpu_dm/amdgpu_dm_trace.h:39, from drivers/gpu/drm/amd/amdgpu/../display/amdgpu_dm/amdgpu_dm.c:41: drivers/gpu/drm/amd/amdgpu/../display/amdgpu_dm/amdgpu_dm.c: At top level: ./include/drm/drm_atomic.h:864:9: error: expected identifier or ‘(’ before ‘for’ 864 | for ((__i) = 0; \ | ^~~ drivers/gpu/drm/amd/amdgpu/../display/amdgpu_dm/amdgpu_dm.c:8317:9: note: in expansion of macro ‘for_each_new_crtc_in_state’ 8317 | for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) | ^~~~~~~~~~~~~~~~~~~~~~~~~~ Move the brace within the #ifdef so that the file can be built with or without CONFIG_DEBUG_FS. Fixes: 8799c0be89eb ("drm/amd/display: Fix vblank refcount in vrr transition") Signed-off-by: Nathan Chancellor Signed-off-by: Linus Torvalds --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c') diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index f6a9e8fdd87d..c053cb79cd06 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -8310,8 +8310,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) crtc, dm_new_crtc_state, cur_crc_src)) DRM_DEBUG_DRIVER("Failed to configure crc source"); } -#endif } +#endif } for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) -- cgit v1.2.3-59-g8ed1b