diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:18:06 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:18:06 +0000 |
commit | 638a9e433ecd61e64761352dbec1fa4f5874c941 (patch) | |
tree | fdbff74a238d7a5a7d1cef071b7230bc064b9f25 /drivers/gpu/drm/vmwgfx | |
parent | Releasing progress-linux version 6.9.12-1~progress7.99u1. (diff) | |
download | linux-638a9e433ecd61e64761352dbec1fa4f5874c941.tar.xz linux-638a9e433ecd61e64761352dbec1fa4f5874c941.zip |
Merging upstream version 6.10.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/vmwgfx')
22 files changed, 870 insertions, 310 deletions
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile index e94479d9cd..46a4ab688a 100644 --- a/drivers/gpu/drm/vmwgfx/Makefile +++ b/drivers/gpu/drm/vmwgfx/Makefile @@ -10,6 +10,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \ vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \ vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \ vmwgfx_devcaps.o ttm_object.o vmwgfx_system_manager.o \ - vmwgfx_gem.o + vmwgfx_gem.o vmwgfx_vkms.o obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c index 6806c05e57..3353e97687 100644 --- a/drivers/gpu/drm/vmwgfx/ttm_object.c +++ b/drivers/gpu/drm/vmwgfx/ttm_object.c @@ -87,14 +87,11 @@ struct ttm_object_file { * * @object_lock: lock that protects idr. * - * @object_count: Per device object count. - * * This is the per-device data structure needed for ttm object management. */ struct ttm_object_device { spinlock_t object_lock; - atomic_t object_count; struct dma_buf_ops ops; void (*dmabuf_release)(struct dma_buf *dma_buf); struct idr idr; @@ -431,7 +428,6 @@ ttm_object_device_init(const struct dma_buf_ops *ops) return NULL; spin_lock_init(&tdev->object_lock); - atomic_set(&tdev->object_count, 0); /* * Our base is at VMWGFX_NUM_MOB + 1 because we want to create diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c index ae2de914eb..2731f6ded1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c @@ -54,6 +54,7 @@ #include "vmwgfx_drv.h" #include "vmwgfx_binding.h" #include "device_include/svga3d_reg.h" +#include <linux/vmalloc.h> #define VMW_BINDING_RT_BIT 0 #define VMW_BINDING_PS_BIT 1 diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c index 195ff8792e..dd4ca6a9c6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c @@ -31,6 +31,7 @@ #include <drm/ttm/ttm_placement.h> #include <linux/sched/signal.h> +#include <linux/vmalloc.h> bool vmw_supports_3d(struct vmw_private *dev_priv) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c b/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c index 829df395c2..6e6beff9e2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_devcaps.c @@ -25,6 +25,7 @@ * **************************************************************************/ +#include <linux/vmalloc.h> #include "vmwgfx_devcaps.h" #include "vmwgfx_drv.h" diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index bea576434e..823d8d2da1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -32,6 +32,7 @@ #include "vmwgfx_binding.h" #include "vmwgfx_devcaps.h" #include "vmwgfx_mksstat.h" +#include "vmwgfx_vkms.h" #include "ttm_object.h" #include <drm/drm_aperture.h> @@ -53,6 +54,7 @@ #include <linux/module.h> #include <linux/pci.h> #include <linux/version.h> +#include <linux/vmalloc.h> #define VMWGFX_DRIVER_DESC "Linux drm driver for VMware graphics devices" @@ -744,7 +746,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev, dev->vram_size = pci_resource_len(pdev, 2); drm_info(&dev->drm, - "Register MMIO at 0x%pa size is %llu kiB\n", + "Register MMIO at 0x%pa size is %llu KiB\n", &rmmio_start, (uint64_t)rmmio_size / 1024); dev->rmmio = devm_ioremap(dev->drm.dev, rmmio_start, @@ -763,7 +765,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev, fifo_size = pci_resource_len(pdev, 2); drm_info(&dev->drm, - "FIFO at %pa size is %llu kiB\n", + "FIFO at %pa size is %llu KiB\n", &fifo_start, (uint64_t)fifo_size / 1024); dev->fifo_mem = devm_memremap(dev->drm.dev, fifo_start, @@ -788,7 +790,7 @@ static int vmw_setup_pci_resources(struct vmw_private *dev, * SVGA_REG_VRAM_SIZE. */ drm_info(&dev->drm, - "VRAM at %pa size is %llu kiB\n", + "VRAM at %pa size is %llu KiB\n", &dev->vram_start, (uint64_t)dev->vram_size / 1024); return 0; @@ -911,6 +913,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) "Please switch to a supported graphics device to avoid problems."); } + vmw_vkms_init(dev_priv); + ret = vmw_dma_select_mode(dev_priv); if (unlikely(ret != 0)) { drm_info(&dev_priv->drm, @@ -980,13 +984,13 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) dev_priv->max_primary_mem = dev_priv->vram_size; } drm_info(&dev_priv->drm, - "Legacy memory limits: VRAM = %llu kB, FIFO = %llu kB, surface = %u kB\n", + "Legacy memory limits: VRAM = %llu KiB, FIFO = %llu KiB, surface = %u KiB\n", (u64)dev_priv->vram_size / 1024, (u64)dev_priv->fifo_mem_size / 1024, dev_priv->memory_size / 1024); drm_info(&dev_priv->drm, - "MOB limits: max mob size = %u kB, max mob pages = %u\n", + "MOB limits: max mob size = %u KiB, max mob pages = %u\n", dev_priv->max_mob_size / 1024, dev_priv->max_mob_pages); ret = vmw_dma_masks(dev_priv); @@ -1004,7 +1008,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id) (unsigned)dev_priv->max_gmr_pages); } drm_info(&dev_priv->drm, - "Maximum display memory size is %llu kiB\n", + "Maximum display memory size is %llu KiB\n", (uint64_t)dev_priv->max_primary_mem / 1024); /* Need mmio memory to check for fifo pitchlock cap. */ @@ -1189,6 +1193,7 @@ static void vmw_driver_unload(struct drm_device *dev) vmw_svga_disable(dev_priv); + vmw_vkms_cleanup(dev_priv); vmw_kms_close(dev_priv); vmw_overlay_close(dev_priv); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h index c1430e5547..a1ce41e1c4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h @@ -117,25 +117,8 @@ struct vmwgfx_hash_item { unsigned long key; }; - -/** - * struct vmw_validate_buffer - Carries validation info about buffers. - * - * @base: Validation info for TTM. - * @hash: Hash entry for quick lookup of the TTM buffer object. - * - * This structure contains also driver private validation info - * on top of the info needed by TTM. - */ -struct vmw_validate_buffer { - struct ttm_validate_buffer base; - struct vmwgfx_hash_item hash; - bool validate_as_mob; -}; - struct vmw_res_func; - /** * struct vmw-resource - base class for hardware resources * @@ -445,15 +428,6 @@ struct vmw_sw_context{ struct vmw_legacy_display; struct vmw_overlay; -struct vmw_vga_topology_state { - uint32_t width; - uint32_t height; - uint32_t primary; - uint32_t pos_x; - uint32_t pos_y; -}; - - /* * struct vmw_otable - Guest Memory OBject table metadata * @@ -501,7 +475,6 @@ struct vmw_private { struct drm_device drm; struct ttm_device bdev; - struct drm_vma_offset_manager vma_manager; u32 pci_id; resource_size_t io_start; resource_size_t vram_start; @@ -642,6 +615,9 @@ struct vmw_private { uint32 *devcaps; + bool vkms_enabled; + struct workqueue_struct *crc_workq; + /* * mksGuestStat instance-descriptor and pid arrays */ @@ -836,6 +812,7 @@ void vmw_resource_mob_attach(struct vmw_resource *res); void vmw_resource_mob_detach(struct vmw_resource *res); void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start, pgoff_t end); +int vmw_resource_clean(struct vmw_resource *res); int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start, pgoff_t end, pgoff_t *num_prefault); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index cc3086e649..2e52d73eba 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -35,6 +35,7 @@ #include <linux/sync_file.h> #include <linux/hashtable.h> +#include <linux/vmalloc.h> /* * Helper macro to get dx_ctx_node if available otherwise print an error diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c index d6bcaf078b..07185c1082 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gem.c @@ -30,6 +30,8 @@ #include "drm/drm_prime.h" #include "drm/drm_gem_ttm_helper.h" +#include <linux/debugfs.h> + static void vmw_gem_object_free(struct drm_gem_object *gobj) { struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gobj); @@ -48,33 +50,20 @@ static void vmw_gem_object_close(struct drm_gem_object *obj, { } -static int vmw_gem_pin_private(struct drm_gem_object *obj, bool do_pin) +static int vmw_gem_object_pin(struct drm_gem_object *obj) { - struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(obj); struct vmw_bo *vbo = to_vmw_bo(obj); - int ret; - ret = ttm_bo_reserve(bo, false, false, NULL); - if (unlikely(ret != 0)) - goto err; + vmw_bo_pin_reserved(vbo, true); - vmw_bo_pin_reserved(vbo, do_pin); - - ttm_bo_unreserve(bo); - -err: - return ret; -} - - -static int vmw_gem_object_pin(struct drm_gem_object *obj) -{ - return vmw_gem_pin_private(obj, true); + return 0; } static void vmw_gem_object_unpin(struct drm_gem_object *obj) { - vmw_gem_pin_private(obj, false); + struct vmw_bo *vbo = to_vmw_bo(obj); + + vmw_bo_pin_reserved(vbo, false); } static struct sg_table *vmw_gem_object_get_sg_table(struct drm_gem_object *obj) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index a0b47c9b33..5bd967fbcf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -94,14 +94,14 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, } else new_max_pages = gman->max_gmr_pages * 2; if (new_max_pages > gman->max_gmr_pages && new_max_pages >= gman->used_gmr_pages) { - DRM_WARN("vmwgfx: increasing guest mob limits to %u kB.\n", + DRM_WARN("vmwgfx: increasing guest mob limits to %u KiB.\n", ((new_max_pages) << (PAGE_SHIFT - 10))); gman->max_gmr_pages = new_max_pages; } else { char buf[256]; snprintf(buf, sizeof(buf), - "vmwgfx, error: guest graphics is out of memory (mob limit at: %ukB).\n", + "vmwgfx, error: guest graphics is out of memory (mob limit at: %u KiB).\n", ((gman->max_gmr_pages) << (PAGE_SHIFT - 10))); vmw_host_printf(buf); DRM_WARN("%s", buf); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c index a1da5678c7..835d1eed8d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c @@ -31,6 +31,7 @@ #include <drm/vmwgfx_drm.h> #include <linux/pci.h> +#include <linux/vmalloc.h> int vmw_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 11755d143e..00c4ff6841 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -27,6 +27,7 @@ #include "vmwgfx_kms.h" #include "vmwgfx_bo.h" +#include "vmwgfx_vkms.h" #include "vmw_surface_cache.h" #include <drm/drm_atomic.h> @@ -37,9 +38,16 @@ #include <drm/drm_sysfs.h> #include <drm/drm_edid.h> +void vmw_du_init(struct vmw_display_unit *du) +{ + vmw_vkms_crtc_init(&du->crtc); +} + void vmw_du_cleanup(struct vmw_display_unit *du) { struct vmw_private *dev_priv = vmw_priv(du->primary.dev); + + vmw_vkms_crtc_cleanup(&du->crtc); drm_plane_cleanup(&du->primary); if (vmw_cmd_supported(dev_priv)) drm_plane_cleanup(&du->cursor.base); @@ -775,7 +783,6 @@ vmw_du_cursor_plane_atomic_update(struct drm_plane *plane, hotspot_y = du->hotspot_y + new_state->hotspot_y; du->cursor_surface = vps->surf; - du->cursor_bo = vps->bo; if (!vps->surf && !vps->bo) { vmw_cursor_update_position(dev_priv, false, 0, 0); @@ -858,15 +865,6 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane, DRM_PLANE_NO_SCALING, DRM_PLANE_NO_SCALING, false, true); - - if (!ret && new_fb) { - struct drm_crtc *crtc = new_state->crtc; - struct vmw_display_unit *du = vmw_crtc_to_du(crtc); - - vmw_connector_state_to_vcs(du->connector.state); - } - - return ret; } @@ -970,15 +968,9 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state) { + vmw_vkms_crtc_atomic_begin(crtc, state); } - -void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_atomic_state *state) -{ -} - - /** * vmw_du_crtc_duplicate_state - duplicate crtc state * @crtc: DRM crtc @@ -1366,7 +1358,6 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv, drm_helper_mode_fill_fb_struct(dev, &vfbs->base.base, mode_cmd); vfbs->surface = vmw_surface_reference(surface); - vfbs->base.user_handle = mode_cmd->handles[0]; vfbs->is_bo_proxy = is_bo_proxy; *out = &vfbs->base; @@ -1534,7 +1525,6 @@ static int vmw_kms_new_framebuffer_bo(struct vmw_private *dev_priv, drm_helper_mode_fill_fb_struct(dev, &vfbd->base.base, mode_cmd); vfbd->base.bo = true; vfbd->buffer = vmw_bo_reference(bo); - vfbd->base.user_handle = mode_cmd->handles[0]; *out = &vfbd->base; ret = drm_framebuffer_init(dev, &vfbd->base.base, @@ -2045,6 +2035,29 @@ vmw_kms_create_hotplug_mode_update_property(struct vmw_private *dev_priv) "hotplug_mode_update", 0, 1); } +static void +vmw_atomic_commit_tail(struct drm_atomic_state *old_state) +{ + struct vmw_private *vmw = vmw_priv(old_state->dev); + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state; + int i; + + drm_atomic_helper_commit_tail(old_state); + + if (vmw->vkms_enabled) { + for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i) { + struct vmw_display_unit *du = vmw_crtc_to_du(crtc); + (void)old_crtc_state; + flush_work(&du->vkms.crc_generator_work); + } + } +} + +static const struct drm_mode_config_helper_funcs vmw_mode_config_helpers = { + .atomic_commit_tail = vmw_atomic_commit_tail, +}; + int vmw_kms_init(struct vmw_private *dev_priv) { struct drm_device *dev = &dev_priv->drm; @@ -2064,6 +2077,7 @@ int vmw_kms_init(struct vmw_private *dev_priv) dev->mode_config.max_width = dev_priv->texture_max_width; dev->mode_config.max_height = dev_priv->texture_max_height; dev->mode_config.preferred_depth = dev_priv->assume_16bpp ? 16 : 32; + dev->mode_config.helper_private = &vmw_mode_config_helpers; drm_mode_create_suggested_offset_properties(dev); vmw_kms_create_hotplug_mode_update_property(dev_priv); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h index 19a843da87..bf24f2f0dc 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.h @@ -217,21 +217,11 @@ struct vmw_kms_dirty { struct vmw_framebuffer { struct drm_framebuffer base; bool bo; - uint32_t user_handle; -}; - -/* - * Clip rectangle - */ -struct vmw_clip_rect { - int x1, x2, y1, y2; }; struct vmw_framebuffer_surface { struct vmw_framebuffer base; struct vmw_surface *surface; - struct vmw_bo *buffer; - struct list_head head; bool is_bo_proxy; /* true if this is proxy surface for DMA buf */ }; @@ -359,7 +349,6 @@ struct vmw_display_unit { struct vmw_cursor_plane cursor; struct vmw_surface *cursor_surface; - struct vmw_bo *cursor_bo; size_t cursor_age; int cursor_x; @@ -387,11 +376,25 @@ struct vmw_display_unit { bool is_implicit; int set_gui_x; int set_gui_y; -}; -struct vmw_validation_ctx { - struct vmw_resource *res; - struct vmw_bo *buf; + struct { + struct work_struct crc_generator_work; + struct hrtimer timer; + ktime_t period_ns; + + /* protects concurrent access to the vblank handler */ + atomic_t atomic_lock; + /* protected by @atomic_lock */ + bool crc_enabled; + struct vmw_surface *surface; + + /* protects concurrent access to the crc worker */ + spinlock_t crc_state_lock; + /* protected by @crc_state_lock */ + bool crc_pending; + u64 frame_start; + u64 frame_end; + } vkms; }; #define vmw_crtc_to_du(x) \ @@ -403,6 +406,7 @@ struct vmw_validation_ctx { /* * Shared display unit functions - vmwgfx_kms.c */ +void vmw_du_init(struct vmw_display_unit *du); void vmw_du_cleanup(struct vmw_display_unit *du); void vmw_du_crtc_save(struct drm_crtc *crtc); void vmw_du_crtc_restore(struct drm_crtc *crtc); @@ -489,8 +493,6 @@ int vmw_du_crtc_atomic_check(struct drm_crtc *crtc, struct drm_atomic_state *state); void vmw_du_crtc_atomic_begin(struct drm_crtc *crtc, struct drm_atomic_state *state); -void vmw_du_crtc_atomic_flush(struct drm_crtc *crtc, - struct drm_atomic_state *state); void vmw_du_crtc_reset(struct drm_crtc *crtc); struct drm_crtc_state *vmw_du_crtc_duplicate_state(struct drm_crtc *crtc); void vmw_du_crtc_destroy_state(struct drm_crtc *crtc, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c index c4db4aecca..5befc2719a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c @@ -27,6 +27,7 @@ #include "vmwgfx_bo.h" #include "vmwgfx_kms.h" +#include "vmwgfx_vkms.h" #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> @@ -241,33 +242,6 @@ static void vmw_ldu_crtc_mode_set_nofb(struct drm_crtc *crtc) { } -/** - * vmw_ldu_crtc_atomic_enable - Noop - * - * @crtc: CRTC associated with the new screen - * @state: Unused - * - * This is called after a mode set has been completed. Here's - * usually a good place to call vmw_ldu_add_active/vmw_ldu_del_active - * but since for LDU the display plane is closely tied to the - * CRTC, it makes more sense to do those at plane update time. - */ -static void vmw_ldu_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_atomic_state *state) -{ -} - -/** - * vmw_ldu_crtc_atomic_disable - Turns off CRTC - * - * @crtc: CRTC to be turned off - * @state: Unused - */ -static void vmw_ldu_crtc_atomic_disable(struct drm_crtc *crtc, - struct drm_atomic_state *state) -{ -} - static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = { .gamma_set = vmw_du_crtc_gamma_set, .destroy = vmw_ldu_crtc_destroy, @@ -276,6 +250,9 @@ static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = { .atomic_destroy_state = vmw_du_crtc_destroy_state, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, + .enable_vblank = vmw_vkms_enable_vblank, + .disable_vblank = vmw_vkms_disable_vblank, + .get_vblank_timestamp = vmw_vkms_get_vblank_timestamp, }; @@ -418,9 +395,9 @@ static const struct drm_crtc_helper_funcs vmw_ldu_crtc_helper_funcs = { .mode_set_nofb = vmw_ldu_crtc_mode_set_nofb, .atomic_check = vmw_du_crtc_atomic_check, .atomic_begin = vmw_du_crtc_atomic_begin, - .atomic_flush = vmw_du_crtc_atomic_flush, - .atomic_enable = vmw_ldu_crtc_atomic_enable, - .atomic_disable = vmw_ldu_crtc_atomic_disable, + .atomic_flush = vmw_vkms_crtc_atomic_flush, + .atomic_enable = vmw_vkms_crtc_atomic_enable, + .atomic_disable = vmw_vkms_crtc_atomic_disable, }; @@ -541,6 +518,8 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit) dev_priv->implicit_placement_property, 1); + vmw_du_init(&ldu->base); + return 0; err_free_unregister: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index ca300c7427..848dba0998 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -1064,6 +1064,22 @@ void vmw_resource_dirty_update(struct vmw_resource *res, pgoff_t start, end << PAGE_SHIFT); } +int vmw_resource_clean(struct vmw_resource *res) +{ + int ret = 0; + + if (res->res_dirty) { + if (!res->func->clean) + return -EINVAL; + + ret = res->func->clean(res); + if (ret) + return ret; + res->res_dirty = false; + } + return ret; +} + /** * vmw_resources_clean - Clean resources intersecting a mob range * @vbo: The mob buffer object @@ -1080,6 +1096,7 @@ int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start, unsigned long res_start = start << PAGE_SHIFT; unsigned long res_end = end << PAGE_SHIFT; unsigned long last_cleaned = 0; + int ret; /* * Find the resource with lowest backup_offset that intersects the @@ -1106,18 +1123,9 @@ int vmw_resources_clean(struct vmw_bo *vbo, pgoff_t start, * intersecting the range. */ while (found) { - if (found->res_dirty) { - int ret; - - if (!found->func->clean) - return -EINVAL; - - ret = found->func->clean(found); - if (ret) - return ret; - - found->res_dirty = false; - } + ret = vmw_resource_clean(found); + if (ret) + return ret; last_cleaned = found->guest_memory_offset + found->guest_memory_size; cur = rb_next(&found->mob_node); if (!cur) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c index 30c3ad27b6..df0039a8ef 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c @@ -27,11 +27,13 @@ #include "vmwgfx_bo.h" #include "vmwgfx_kms.h" +#include "vmwgfx_vkms.h" #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_vblank.h> #define vmw_crtc_to_sou(x) \ container_of(x, struct vmw_screen_object_unit, base.crtc) @@ -89,7 +91,6 @@ struct vmw_kms_sou_define_gmrfb { struct vmw_screen_object_unit { struct vmw_display_unit base; - unsigned long buffer_size; /**< Size of allocated buffer */ struct vmw_bo *buffer; /**< Backing store buffer */ bool defined; @@ -240,7 +241,6 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc) int x, y; sou->buffer = vps->bo; - sou->buffer_size = vps->bo_size; conn_state = sou->base.connector.state; vmw_conn_state = vmw_connector_state_to_vcs(conn_state); @@ -255,7 +255,6 @@ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc) } else { sou->buffer = NULL; - sou->buffer_size = 0; } } @@ -271,19 +270,6 @@ static void vmw_sou_crtc_helper_prepare(struct drm_crtc *crtc) } /** - * vmw_sou_crtc_atomic_enable - Noop - * - * @crtc: CRTC associated with the new screen - * @state: Unused - * - * This is called after a mode set has been completed. - */ -static void vmw_sou_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_atomic_state *state) -{ -} - -/** * vmw_sou_crtc_atomic_disable - Turns off CRTC * * @crtc: CRTC to be turned off @@ -305,6 +291,9 @@ static void vmw_sou_crtc_atomic_disable(struct drm_crtc *crtc, sou = vmw_crtc_to_sou(crtc); dev_priv = vmw_priv(crtc->dev); + if (dev_priv->vkms_enabled) + drm_crtc_vblank_off(crtc); + if (sou->defined) { ret = vmw_sou_fifo_destroy(dev_priv, sou); if (ret) @@ -320,6 +309,9 @@ static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = { .atomic_destroy_state = vmw_du_crtc_destroy_state, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, + .enable_vblank = vmw_vkms_enable_vblank, + .disable_vblank = vmw_vkms_disable_vblank, + .get_vblank_timestamp = vmw_vkms_get_vblank_timestamp, }; /* @@ -797,8 +789,8 @@ static const struct drm_crtc_helper_funcs vmw_sou_crtc_helper_funcs = { .mode_set_nofb = vmw_sou_crtc_mode_set_nofb, .atomic_check = vmw_du_crtc_atomic_check, .atomic_begin = vmw_du_crtc_atomic_begin, - .atomic_flush = vmw_du_crtc_atomic_flush, - .atomic_enable = vmw_sou_crtc_atomic_enable, + .atomic_flush = vmw_vkms_crtc_atomic_flush, + .atomic_enable = vmw_vkms_crtc_atomic_enable, .atomic_disable = vmw_sou_crtc_atomic_disable, }; @@ -908,6 +900,9 @@ static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit) dev->mode_config.suggested_x_property, 0); drm_object_attach_property(&connector->base, dev->mode_config.suggested_y_property, 0); + + vmw_du_init(&sou->base); + return 0; err_free_unregister: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c index dbc44ecbd1..a04e073631 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c @@ -27,12 +27,14 @@ #include "vmwgfx_bo.h" #include "vmwgfx_kms.h" +#include "vmwgfx_vkms.h" #include "vmw_surface_cache.h" #include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_fourcc.h> +#include <drm/drm_vblank.h> #define vmw_crtc_to_stdu(x) \ container_of(x, struct vmw_screen_target_display_unit, base.crtc) @@ -90,11 +92,6 @@ struct vmw_stdu_update { SVGA3dCmdUpdateGBScreenTarget body; }; -struct vmw_stdu_dma { - SVGA3dCmdHeader header; - SVGA3dCmdSurfaceDMA body; -}; - struct vmw_stdu_surface_copy { SVGA3dCmdHeader header; SVGA3dCmdSurfaceCopy body; @@ -414,24 +411,14 @@ static void vmw_stdu_crtc_mode_set_nofb(struct drm_crtc *crtc) crtc->x, crtc->y); } - -static void vmw_stdu_crtc_helper_prepare(struct drm_crtc *crtc) -{ -} - -static void vmw_stdu_crtc_atomic_enable(struct drm_crtc *crtc, - struct drm_atomic_state *state) -{ -} - static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc, struct drm_atomic_state *state) { struct vmw_private *dev_priv; struct vmw_screen_target_display_unit *stdu; + struct drm_crtc_state *new_crtc_state; int ret; - if (!crtc) { DRM_ERROR("CRTC is NULL\n"); return; @@ -439,6 +426,10 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc, stdu = vmw_crtc_to_stdu(crtc); dev_priv = vmw_priv(crtc->dev); + new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + + if (dev_priv->vkms_enabled) + drm_crtc_vblank_off(crtc); if (stdu->defined) { ret = vmw_stdu_bind_st(dev_priv, stdu, NULL); @@ -447,6 +438,14 @@ static void vmw_stdu_crtc_atomic_disable(struct drm_crtc *crtc, (void) vmw_stdu_update_st(dev_priv, stdu); + /* Don't destroy the Screen Target if we are only setting the + * display as inactive + */ + if (new_crtc_state->enable && + !new_crtc_state->active && + !new_crtc_state->mode_changed) + return; + ret = vmw_stdu_destroy_st(dev_priv, stdu); if (ret) DRM_ERROR("Failed to destroy Screen Target\n"); @@ -777,7 +776,6 @@ out_unref: return ret; } - /* * Screen Target CRTC dispatch table */ @@ -789,6 +787,12 @@ static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = { .atomic_destroy_state = vmw_du_crtc_destroy_state, .set_config = drm_atomic_helper_set_config, .page_flip = drm_atomic_helper_page_flip, + .enable_vblank = vmw_vkms_enable_vblank, + .disable_vblank = vmw_vkms_disable_vblank, + .get_vblank_timestamp = vmw_vkms_get_vblank_timestamp, + .get_crc_sources = vmw_vkms_get_crc_sources, + .set_crc_source = vmw_vkms_set_crc_source, + .verify_crc_source = vmw_vkms_verify_crc_source, }; @@ -1454,6 +1458,17 @@ vmw_stdu_primary_plane_atomic_update(struct drm_plane *plane, vmw_fence_obj_unreference(&fence); } +static void +vmw_stdu_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct vmw_private *vmw = vmw_priv(crtc->dev); + struct vmw_screen_target_display_unit *stdu = vmw_crtc_to_stdu(crtc); + + if (vmw->vkms_enabled) + vmw_vkms_set_crc_surface(crtc, stdu->display_srf); + vmw_vkms_crtc_atomic_flush(crtc, state); +} static const struct drm_plane_funcs vmw_stdu_plane_funcs = { .update_plane = drm_atomic_helper_update_plane, @@ -1494,12 +1509,11 @@ drm_plane_helper_funcs vmw_stdu_primary_plane_helper_funcs = { }; static const struct drm_crtc_helper_funcs vmw_stdu_crtc_helper_funcs = { - .prepare = vmw_stdu_crtc_helper_prepare, .mode_set_nofb = vmw_stdu_crtc_mode_set_nofb, .atomic_check = vmw_du_crtc_atomic_check, .atomic_begin = vmw_du_crtc_atomic_begin, - .atomic_flush = vmw_du_crtc_atomic_flush, - .atomic_enable = vmw_stdu_crtc_atomic_enable, + .atomic_flush = vmw_stdu_crtc_atomic_flush, + .atomic_enable = vmw_vkms_crtc_atomic_enable, .atomic_disable = vmw_stdu_crtc_atomic_disable, }; @@ -1616,6 +1630,9 @@ static int vmw_stdu_init(struct vmw_private *dev_priv, unsigned unit) dev->mode_config.suggested_x_property, 0); drm_object_attach_property(&connector->base, dev->mode_config.suggested_y_property, 0); + + vmw_du_init(&stdu->base); + return 0; err_free_unregister: diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c deleted file mode 100644 index 90097d04b4..0000000000 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_glue.c +++ /dev/null @@ -1,110 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR MIT -/************************************************************************** - * - * Copyright 2009-2011 VMware, Inc., Palo Alto, CA., USA - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - **************************************************************************/ - -#include "vmwgfx_drv.h" - -static int vmw_bo_vm_lookup(struct ttm_device *bdev, - struct drm_file *filp, - unsigned long offset, - unsigned long pages, - struct ttm_buffer_object **p_bo) -{ - struct vmw_private *dev_priv = container_of(bdev, struct vmw_private, bdev); - struct drm_device *drm = &dev_priv->drm; - struct drm_vma_offset_node *node; - int ret; - - *p_bo = NULL; - - drm_vma_offset_lock_lookup(bdev->vma_manager); - - node = drm_vma_offset_lookup_locked(bdev->vma_manager, offset, pages); - if (likely(node)) { - *p_bo = container_of(node, struct ttm_buffer_object, - base.vma_node); - *p_bo = ttm_bo_get_unless_zero(*p_bo); - } - - drm_vma_offset_unlock_lookup(bdev->vma_manager); - - if (!*p_bo) { - drm_err(drm, "Could not find buffer object to map\n"); - return -EINVAL; - } - - if (!drm_vma_node_is_allowed(node, filp)) { - ret = -EACCES; - goto out_no_access; - } - - return 0; -out_no_access: - ttm_bo_put(*p_bo); - return ret; -} - -int vmw_mmap(struct file *filp, struct vm_area_struct *vma) -{ - static const struct vm_operations_struct vmw_vm_ops = { - .pfn_mkwrite = vmw_bo_vm_mkwrite, - .page_mkwrite = vmw_bo_vm_mkwrite, - .fault = vmw_bo_vm_fault, - .open = ttm_bo_vm_open, - .close = ttm_bo_vm_close, - }; - struct drm_file *file_priv = filp->private_data; - struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); - struct ttm_device *bdev = &dev_priv->bdev; - struct ttm_buffer_object *bo; - int ret; - - if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET_START)) - return -EINVAL; - - ret = vmw_bo_vm_lookup(bdev, file_priv, vma->vm_pgoff, vma_pages(vma), &bo); - if (unlikely(ret != 0)) - return ret; - - ret = ttm_bo_mmap_obj(vma, bo); - if (unlikely(ret != 0)) - goto out_unref; - - vma->vm_ops = &vmw_vm_ops; - - /* Use VM_PFNMAP rather than VM_MIXEDMAP if not a COW mapping */ - if (!is_cow_mapping(vma->vm_flags)) - vm_flags_mod(vma, VM_PFNMAP, VM_MIXEDMAP); - - ttm_bo_put(bo); /* release extra ref taken by ttm_bo_mmap_obj() */ - - return 0; - -out_unref: - ttm_bo_put(bo); - return ret; -} - diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c index aaacbdcbd7..e7625b3f71 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c @@ -32,9 +32,6 @@ #include <linux/slab.h> - -#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE) - /** * struct vmw_validation_bo_node - Buffer object validation metadata. * @base: Metadata used for TTM reservation- and validation. @@ -112,20 +109,10 @@ void *vmw_validation_mem_alloc(struct vmw_validation_context *ctx, return NULL; if (ctx->mem_size_left < size) { - struct page *page; - - if (ctx->vm && ctx->vm_size_left < PAGE_SIZE) { - ctx->vm_size_left += VMWGFX_VALIDATION_MEM_GRAN; - ctx->total_mem += VMWGFX_VALIDATION_MEM_GRAN; - } - - page = alloc_page(GFP_KERNEL | __GFP_ZERO); + struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (!page) return NULL; - if (ctx->vm) - ctx->vm_size_left -= PAGE_SIZE; - list_add_tail(&page->lru, &ctx->page_list); ctx->page_address = page_address(page); ctx->mem_size_left = PAGE_SIZE; @@ -155,10 +142,6 @@ static void vmw_validation_mem_free(struct vmw_validation_context *ctx) } ctx->mem_size_left = 0; - if (ctx->vm && ctx->total_mem) { - ctx->total_mem = 0; - ctx->vm_size_left = 0; - } } /** diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h index 240ee0c4eb..353d837907 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.h @@ -52,10 +52,6 @@ * buffer objects * @mem_size_left: Free memory left in the last page in @page_list * @page_address: Kernel virtual address of the last page in @page_list - * @vm: A pointer to the memory reservation interface or NULL if no - * memory reservation is needed. - * @vm_size_left: Amount of reserved memory that so far has not been allocated. - * @total_mem: Amount of reserved memory. */ struct vmw_validation_context { struct vmw_sw_context *sw_context; @@ -68,9 +64,6 @@ struct vmw_validation_context { unsigned int merge_dups; unsigned int mem_size_left; u8 *page_address; - struct vmw_validation_mem *vm; - size_t vm_size_left; - size_t total_mem; }; struct vmw_bo; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c new file mode 100644 index 0000000000..7e93a45948 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.c @@ -0,0 +1,632 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/************************************************************************** + * + * Copyright (c) 2024 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#include "vmwgfx_vkms.h" + +#include "vmwgfx_bo.h" +#include "vmwgfx_drv.h" +#include "vmwgfx_kms.h" +#include "vmwgfx_vkms.h" + +#include "vmw_surface_cache.h" + +#include <drm/drm_crtc.h> +#include <drm/drm_debugfs_crc.h> +#include <drm/drm_print.h> +#include <drm/drm_vblank.h> + +#include <linux/crc32.h> +#include <linux/delay.h> + +#define GUESTINFO_VBLANK "guestinfo.vmwgfx.vkms_enable" + +static int +vmw_surface_sync(struct vmw_private *vmw, + struct vmw_surface *surf) +{ + int ret; + struct vmw_fence_obj *fence = NULL; + struct vmw_bo *bo = surf->res.guest_memory_bo; + + vmw_resource_clean(&surf->res); + + ret = ttm_bo_reserve(&bo->tbo, false, false, NULL); + if (ret != 0) { + drm_warn(&vmw->drm, "%s: failed reserve\n", __func__); + goto done; + } + + ret = vmw_execbuf_fence_commands(NULL, vmw, &fence, NULL); + if (ret != 0) { + drm_warn(&vmw->drm, "%s: failed execbuf\n", __func__); + ttm_bo_unreserve(&bo->tbo); + goto done; + } + + dma_fence_wait(&fence->base, false); + dma_fence_put(&fence->base); + + ttm_bo_unreserve(&bo->tbo); +done: + return ret; +} + +static int +compute_crc(struct drm_crtc *crtc, + struct vmw_surface *surf, + u32 *crc) +{ + u8 *mapped_surface; + struct vmw_bo *bo = surf->res.guest_memory_bo; + const struct SVGA3dSurfaceDesc *desc = + vmw_surface_get_desc(surf->metadata.format); + u32 row_pitch_bytes; + SVGA3dSize blocks; + u32 y; + + *crc = 0; + + vmw_surface_get_size_in_blocks(desc, &surf->metadata.base_size, &blocks); + row_pitch_bytes = blocks.width * desc->pitchBytesPerBlock; + WARN_ON(!bo); + mapped_surface = vmw_bo_map_and_cache(bo); + + for (y = 0; y < blocks.height; y++) { + *crc = crc32_le(*crc, mapped_surface, row_pitch_bytes); + mapped_surface += row_pitch_bytes; + } + + vmw_bo_unmap(bo); + + return 0; +} + +static void +crc_generate_worker(struct work_struct *work) +{ + struct vmw_display_unit *du = + container_of(work, struct vmw_display_unit, vkms.crc_generator_work); + struct drm_crtc *crtc = &du->crtc; + struct vmw_private *vmw = vmw_priv(crtc->dev); + bool crc_pending; + u64 frame_start, frame_end; + u32 crc32 = 0; + struct vmw_surface *surf = 0; + int ret; + + spin_lock_irq(&du->vkms.crc_state_lock); + crc_pending = du->vkms.crc_pending; + spin_unlock_irq(&du->vkms.crc_state_lock); + + /* + * We raced with the vblank hrtimer and previous work already computed + * the crc, nothing to do. + */ + if (!crc_pending) + return; + + spin_lock_irq(&du->vkms.crc_state_lock); + surf = du->vkms.surface; + spin_unlock_irq(&du->vkms.crc_state_lock); + + if (vmw_surface_sync(vmw, surf)) { + drm_warn(crtc->dev, "CRC worker wasn't able to sync the crc surface!\n"); + return; + } + + ret = compute_crc(crtc, surf, &crc32); + if (ret) + return; + + spin_lock_irq(&du->vkms.crc_state_lock); + frame_start = du->vkms.frame_start; + frame_end = du->vkms.frame_end; + crc_pending = du->vkms.crc_pending; + du->vkms.frame_start = 0; + du->vkms.frame_end = 0; + du->vkms.crc_pending = false; + spin_unlock_irq(&du->vkms.crc_state_lock); + + /* + * The worker can fall behind the vblank hrtimer, make sure we catch up. + */ + while (frame_start <= frame_end) + drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32); +} + +static enum hrtimer_restart +vmw_vkms_vblank_simulate(struct hrtimer *timer) +{ + struct vmw_display_unit *du = container_of(timer, struct vmw_display_unit, vkms.timer); + struct drm_crtc *crtc = &du->crtc; + struct vmw_private *vmw = vmw_priv(crtc->dev); + struct vmw_surface *surf = NULL; + u64 ret_overrun; + bool locked, ret; + + ret_overrun = hrtimer_forward_now(&du->vkms.timer, + du->vkms.period_ns); + if (ret_overrun != 1) + drm_dbg_driver(crtc->dev, "vblank timer missed %lld frames.\n", + ret_overrun - 1); + + locked = vmw_vkms_vblank_trylock(crtc); + ret = drm_crtc_handle_vblank(crtc); + WARN_ON(!ret); + if (!locked) + return HRTIMER_RESTART; + surf = du->vkms.surface; + vmw_vkms_unlock(crtc); + + if (du->vkms.crc_enabled && surf) { + u64 frame = drm_crtc_accurate_vblank_count(crtc); + + spin_lock(&du->vkms.crc_state_lock); + if (!du->vkms.crc_pending) + du->vkms.frame_start = frame; + else + drm_dbg_driver(crtc->dev, + "crc worker falling behind, frame_start: %llu, frame_end: %llu\n", + du->vkms.frame_start, frame); + du->vkms.frame_end = frame; + du->vkms.crc_pending = true; + spin_unlock(&du->vkms.crc_state_lock); + + ret = queue_work(vmw->crc_workq, &du->vkms.crc_generator_work); + if (!ret) + drm_dbg_driver(crtc->dev, "Composer worker already queued\n"); + } + + return HRTIMER_RESTART; +} + +void +vmw_vkms_init(struct vmw_private *vmw) +{ + char buffer[64]; + const size_t max_buf_len = sizeof(buffer) - 1; + size_t buf_len = max_buf_len; + int ret; + + vmw->vkms_enabled = false; + + ret = vmw_host_get_guestinfo(GUESTINFO_VBLANK, buffer, &buf_len); + if (ret || buf_len > max_buf_len) + return; + buffer[buf_len] = '\0'; + + ret = kstrtobool(buffer, &vmw->vkms_enabled); + if (!ret && vmw->vkms_enabled) { + ret = drm_vblank_init(&vmw->drm, VMWGFX_NUM_DISPLAY_UNITS); + vmw->vkms_enabled = (ret == 0); + } + + vmw->crc_workq = alloc_ordered_workqueue("vmwgfx_crc_generator", 0); + if (!vmw->crc_workq) { + drm_warn(&vmw->drm, "crc workqueue allocation failed. Disabling vkms."); + vmw->vkms_enabled = false; + } + if (vmw->vkms_enabled) + drm_info(&vmw->drm, "VKMS enabled\n"); +} + +void +vmw_vkms_cleanup(struct vmw_private *vmw) +{ + destroy_workqueue(vmw->crc_workq); +} + +bool +vmw_vkms_get_vblank_timestamp(struct drm_crtc *crtc, + int *max_error, + ktime_t *vblank_time, + bool in_vblank_irq) +{ + struct drm_device *dev = crtc->dev; + struct vmw_private *vmw = vmw_priv(dev); + unsigned int pipe = crtc->index; + struct vmw_display_unit *du = vmw_crtc_to_du(crtc); + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + + if (!vmw->vkms_enabled) + return false; + + if (!READ_ONCE(vblank->enabled)) { + *vblank_time = ktime_get(); + return true; + } + + *vblank_time = READ_ONCE(du->vkms.timer.node.expires); + + if (WARN_ON(*vblank_time == vblank->time)) + return true; + + /* + * To prevent races we roll the hrtimer forward before we do any + * interrupt processing - this is how real hw works (the interrupt is + * only generated after all the vblank registers are updated) and what + * the vblank core expects. Therefore we need to always correct the + * timestampe by one frame. + */ + *vblank_time -= du->vkms.period_ns; + + return true; +} + +int +vmw_vkms_enable_vblank(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct vmw_private *vmw = vmw_priv(dev); + unsigned int pipe = drm_crtc_index(crtc); + struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct vmw_display_unit *du = vmw_crtc_to_du(crtc); + + if (!vmw->vkms_enabled) + return -EINVAL; + + drm_calc_timestamping_constants(crtc, &crtc->mode); + + hrtimer_init(&du->vkms.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + du->vkms.timer.function = &vmw_vkms_vblank_simulate; + du->vkms.period_ns = ktime_set(0, vblank->framedur_ns); + hrtimer_start(&du->vkms.timer, du->vkms.period_ns, HRTIMER_MODE_REL); + + return 0; +} + +void +vmw_vkms_disable_vblank(struct drm_crtc *crtc) +{ + struct vmw_display_unit *du = vmw_crtc_to_du(crtc); + struct vmw_private *vmw = vmw_priv(crtc->dev); + + if (!vmw->vkms_enabled) + return; + + hrtimer_cancel(&du->vkms.timer); + du->vkms.surface = NULL; + du->vkms.period_ns = ktime_set(0, 0); +} + +enum vmw_vkms_lock_state { + VMW_VKMS_LOCK_UNLOCKED = 0, + VMW_VKMS_LOCK_MODESET = 1, + VMW_VKMS_LOCK_VBLANK = 2 +}; + +void +vmw_vkms_crtc_init(struct drm_crtc *crtc) +{ + struct vmw_display_unit *du = vmw_crtc_to_du(crtc); + + atomic_set(&du->vkms.atomic_lock, VMW_VKMS_LOCK_UNLOCKED); + spin_lock_init(&du->vkms.crc_state_lock); + + INIT_WORK(&du->vkms.crc_generator_work, crc_generate_worker); + du->vkms.surface = NULL; +} + +void +vmw_vkms_crtc_cleanup(struct drm_crtc *crtc) +{ + struct vmw_display_unit *du = vmw_crtc_to_du(crtc); + + WARN_ON(work_pending(&du->vkms.crc_generator_work)); + hrtimer_cancel(&du->vkms.timer); +} + +void +vmw_vkms_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct vmw_private *vmw = vmw_priv(crtc->dev); + + if (vmw->vkms_enabled) + vmw_vkms_modeset_lock(crtc); +} + +void +vmw_vkms_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + unsigned long flags; + struct vmw_private *vmw = vmw_priv(crtc->dev); + + if (!vmw->vkms_enabled) + return; + + if (crtc->state->event) { + spin_lock_irqsave(&crtc->dev->event_lock, flags); + + if (drm_crtc_vblank_get(crtc) != 0) + drm_crtc_send_vblank_event(crtc, crtc->state->event); + else + drm_crtc_arm_vblank_event(crtc, crtc->state->event); + + spin_unlock_irqrestore(&crtc->dev->event_lock, flags); + + crtc->state->event = NULL; + } + + vmw_vkms_unlock(crtc); +} + +void +vmw_vkms_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct vmw_private *vmw = vmw_priv(crtc->dev); + + if (vmw->vkms_enabled) + drm_crtc_vblank_on(crtc); +} + +void +vmw_vkms_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct vmw_private *vmw = vmw_priv(crtc->dev); + + if (vmw->vkms_enabled) + drm_crtc_vblank_off(crtc); +} + +static bool +is_crc_supported(struct drm_crtc *crtc) +{ + struct vmw_private *vmw = vmw_priv(crtc->dev); + + if (!vmw->vkms_enabled) + return false; + + if (vmw->active_display_unit != vmw_du_screen_target) + return false; + + return true; +} + +static const char * const pipe_crc_sources[] = {"auto"}; + +static int +crc_parse_source(const char *src_name, + bool *enabled) +{ + int ret = 0; + + if (!src_name) { + *enabled = false; + } else if (strcmp(src_name, "auto") == 0) { + *enabled = true; + } else { + *enabled = false; + ret = -EINVAL; + } + + return ret; +} + +const char *const * +vmw_vkms_get_crc_sources(struct drm_crtc *crtc, + size_t *count) +{ + *count = 0; + if (!is_crc_supported(crtc)) + return NULL; + + *count = ARRAY_SIZE(pipe_crc_sources); + return pipe_crc_sources; +} + +int +vmw_vkms_verify_crc_source(struct drm_crtc *crtc, + const char *src_name, + size_t *values_cnt) +{ + bool enabled; + + if (!is_crc_supported(crtc)) + return -EINVAL; + + if (crc_parse_source(src_name, &enabled) < 0) { + drm_dbg_driver(crtc->dev, "unknown source '%s'\n", src_name); + return -EINVAL; + } + + *values_cnt = 1; + + return 0; +} + +int +vmw_vkms_set_crc_source(struct drm_crtc *crtc, + const char *src_name) +{ + struct vmw_display_unit *du = vmw_crtc_to_du(crtc); + bool enabled, prev_enabled, locked; + int ret; + + if (!is_crc_supported(crtc)) + return -EINVAL; + + ret = crc_parse_source(src_name, &enabled); + + if (enabled) + drm_crtc_vblank_get(crtc); + + locked = vmw_vkms_modeset_lock_relaxed(crtc); + prev_enabled = du->vkms.crc_enabled; + du->vkms.crc_enabled = enabled; + if (locked) + vmw_vkms_unlock(crtc); + + if (prev_enabled) + drm_crtc_vblank_put(crtc); + + return ret; +} + +void +vmw_vkms_set_crc_surface(struct drm_crtc *crtc, + struct vmw_surface *surf) +{ + struct vmw_display_unit *du = vmw_crtc_to_du(crtc); + struct vmw_private *vmw = vmw_priv(crtc->dev); + + if (vmw->vkms_enabled) { + WARN_ON(atomic_read(&du->vkms.atomic_lock) != VMW_VKMS_LOCK_MODESET); + du->vkms.surface = surf; + } +} + +/** + * vmw_vkms_lock_max_wait_ns - Return the max wait for the vkms lock + * @du: The vmw_display_unit from which to grab the vblank timings + * + * Returns the maximum wait time used to acquire the vkms lock. By + * default uses a time of a single frame and in case where vblank + * was not initialized for the display unit 1/60th of a second. + */ +static inline u64 +vmw_vkms_lock_max_wait_ns(struct vmw_display_unit *du) +{ + s64 nsecs = ktime_to_ns(du->vkms.period_ns); + + return (nsecs > 0) ? nsecs : 16666666; +} + +/** + * vmw_vkms_modeset_lock - Protects access to crtc during modeset + * @crtc: The crtc to lock for vkms + * + * This function prevents the VKMS timers/callbacks from being called + * while a modeset operation is in process. We don't want the callbacks + * e.g. the vblank simulator to be trying to access incomplete state + * so we need to make sure they execute only when the modeset has + * finished. + * + * Normally this would have been done with a spinlock but locking the + * entire atomic modeset with vmwgfx is impossible because kms prepare + * executes non-atomic ops (e.g. vmw_validation_prepare holds a mutex to + * guard various bits of state). Which means that we need to synchronize + * atomic context (the vblank handler) with the non-atomic entirity + * of kms - so use an atomic_t to track which part of vkms has access + * to the basic vkms state. + */ +void +vmw_vkms_modeset_lock(struct drm_crtc *crtc) +{ + struct vmw_display_unit *du = vmw_crtc_to_du(crtc); + const u64 nsecs_delay = 10; + const u64 MAX_NSECS_DELAY = vmw_vkms_lock_max_wait_ns(du); + u64 total_delay = 0; + int ret; + + do { + ret = atomic_cmpxchg(&du->vkms.atomic_lock, + VMW_VKMS_LOCK_UNLOCKED, + VMW_VKMS_LOCK_MODESET); + if (ret == VMW_VKMS_LOCK_UNLOCKED || total_delay >= MAX_NSECS_DELAY) + break; + ndelay(nsecs_delay); + total_delay += nsecs_delay; + } while (1); + + if (total_delay >= MAX_NSECS_DELAY) { + drm_warn(crtc->dev, "VKMS lock expired! total_delay = %lld, ret = %d, cur = %d\n", + total_delay, ret, atomic_read(&du->vkms.atomic_lock)); + } +} + +/** + * vmw_vkms_modeset_lock_relaxed - Protects access to crtc during modeset + * @crtc: The crtc to lock for vkms + * + * Much like vmw_vkms_modeset_lock except that when the crtc is currently + * in a modeset it will return immediately. + * + * Returns true if actually locked vkms to modeset or false otherwise. + */ +bool +vmw_vkms_modeset_lock_relaxed(struct drm_crtc *crtc) +{ + struct vmw_display_unit *du = vmw_crtc_to_du(crtc); + const u64 nsecs_delay = 10; + const u64 MAX_NSECS_DELAY = vmw_vkms_lock_max_wait_ns(du); + u64 total_delay = 0; + int ret; + + do { + ret = atomic_cmpxchg(&du->vkms.atomic_lock, + VMW_VKMS_LOCK_UNLOCKED, + VMW_VKMS_LOCK_MODESET); + if (ret == VMW_VKMS_LOCK_UNLOCKED || + ret == VMW_VKMS_LOCK_MODESET || + total_delay >= MAX_NSECS_DELAY) + break; + ndelay(nsecs_delay); + total_delay += nsecs_delay; + } while (1); + + if (total_delay >= MAX_NSECS_DELAY) { + drm_warn(crtc->dev, "VKMS relaxed lock expired!\n"); + return false; + } + + return ret == VMW_VKMS_LOCK_UNLOCKED; +} + +/** + * vmw_vkms_vblank_trylock - Protects access to crtc during vblank + * @crtc: The crtc to lock for vkms + * + * Tries to lock vkms for vblank, returns immediately. + * + * Returns true if locked vkms to vblank or false otherwise. + */ +bool +vmw_vkms_vblank_trylock(struct drm_crtc *crtc) +{ + struct vmw_display_unit *du = vmw_crtc_to_du(crtc); + u32 ret; + + ret = atomic_cmpxchg(&du->vkms.atomic_lock, + VMW_VKMS_LOCK_UNLOCKED, + VMW_VKMS_LOCK_VBLANK); + + return ret == VMW_VKMS_LOCK_UNLOCKED; +} + +void +vmw_vkms_unlock(struct drm_crtc *crtc) +{ + struct vmw_display_unit *du = vmw_crtc_to_du(crtc); + + /* Release flag; mark it as unlocked. */ + atomic_set(&du->vkms.atomic_lock, VMW_VKMS_LOCK_UNLOCKED); +} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.h b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.h new file mode 100644 index 0000000000..69ddd33a84 --- /dev/null +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_vkms.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +/************************************************************************** + * + * Copyright (c) 2024 Broadcom. All Rights Reserved. The term + * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + **************************************************************************/ + +#ifndef VMWGFX_VKMS_H_ +#define VMWGFX_VKMS_H_ + +#include <linux/hrtimer_types.h> +#include <linux/types.h> + +struct drm_atomic_state; +struct drm_crtc; +struct vmw_private; +struct vmw_surface; + +void vmw_vkms_init(struct vmw_private *vmw); +void vmw_vkms_cleanup(struct vmw_private *vmw); + +void vmw_vkms_modeset_lock(struct drm_crtc *crtc); +bool vmw_vkms_modeset_lock_relaxed(struct drm_crtc *crtc); +bool vmw_vkms_vblank_trylock(struct drm_crtc *crtc); +void vmw_vkms_unlock(struct drm_crtc *crtc); + +bool vmw_vkms_get_vblank_timestamp(struct drm_crtc *crtc, + int *max_error, + ktime_t *vblank_time, + bool in_vblank_irq); +int vmw_vkms_enable_vblank(struct drm_crtc *crtc); +void vmw_vkms_disable_vblank(struct drm_crtc *crtc); + +void vmw_vkms_crtc_init(struct drm_crtc *crtc); +void vmw_vkms_crtc_cleanup(struct drm_crtc *crtc); +void vmw_vkms_crtc_atomic_begin(struct drm_crtc *crtc, + struct drm_atomic_state *state); +void vmw_vkms_crtc_atomic_flush(struct drm_crtc *crtc, struct drm_atomic_state *state); +void vmw_vkms_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state); +void vmw_vkms_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state); + +const char *const *vmw_vkms_get_crc_sources(struct drm_crtc *crtc, + size_t *count); +int vmw_vkms_verify_crc_source(struct drm_crtc *crtc, + const char *src_name, + size_t *values_cnt); +int vmw_vkms_set_crc_source(struct drm_crtc *crtc, + const char *src_name); +void vmw_vkms_set_crc_surface(struct drm_crtc *crtc, + struct vmw_surface *surf); + +#endif |