From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- drivers/gpu/drm/qxl/Kconfig | 12 + drivers/gpu/drm/qxl/Makefile | 8 + drivers/gpu/drm/qxl/qxl_cmd.c | 651 +++++++++++++++++++ drivers/gpu/drm/qxl/qxl_debugfs.c | 126 ++++ drivers/gpu/drm/qxl/qxl_dev.h | 876 +++++++++++++++++++++++++ drivers/gpu/drm/qxl/qxl_display.c | 1291 +++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/qxl/qxl_draw.c | 268 ++++++++ drivers/gpu/drm/qxl/qxl_drv.c | 312 +++++++++ drivers/gpu/drm/qxl/qxl_drv.h | 463 +++++++++++++ drivers/gpu/drm/qxl/qxl_dumb.c | 76 +++ drivers/gpu/drm/qxl/qxl_gem.c | 131 ++++ drivers/gpu/drm/qxl/qxl_image.c | 240 +++++++ drivers/gpu/drm/qxl/qxl_ioctl.c | 415 ++++++++++++ drivers/gpu/drm/qxl/qxl_irq.c | 106 +++ drivers/gpu/drm/qxl/qxl_kms.c | 322 +++++++++ drivers/gpu/drm/qxl/qxl_object.c | 414 ++++++++++++ drivers/gpu/drm/qxl/qxl_object.h | 75 +++ drivers/gpu/drm/qxl/qxl_prime.c | 75 +++ drivers/gpu/drm/qxl/qxl_release.c | 439 +++++++++++++ drivers/gpu/drm/qxl/qxl_ttm.c | 235 +++++++ 20 files changed, 6535 insertions(+) create mode 100644 drivers/gpu/drm/qxl/Kconfig create mode 100644 drivers/gpu/drm/qxl/Makefile create mode 100644 drivers/gpu/drm/qxl/qxl_cmd.c create mode 100644 drivers/gpu/drm/qxl/qxl_debugfs.c create mode 100644 drivers/gpu/drm/qxl/qxl_dev.h create mode 100644 drivers/gpu/drm/qxl/qxl_display.c create mode 100644 drivers/gpu/drm/qxl/qxl_draw.c create mode 100644 drivers/gpu/drm/qxl/qxl_drv.c create mode 100644 drivers/gpu/drm/qxl/qxl_drv.h create mode 100644 drivers/gpu/drm/qxl/qxl_dumb.c create mode 100644 drivers/gpu/drm/qxl/qxl_gem.c create mode 100644 drivers/gpu/drm/qxl/qxl_image.c create mode 100644 drivers/gpu/drm/qxl/qxl_ioctl.c create mode 100644 drivers/gpu/drm/qxl/qxl_irq.c create mode 100644 drivers/gpu/drm/qxl/qxl_kms.c create mode 100644 drivers/gpu/drm/qxl/qxl_object.c create mode 100644 drivers/gpu/drm/qxl/qxl_object.h create mode 100644 drivers/gpu/drm/qxl/qxl_prime.c create mode 100644 drivers/gpu/drm/qxl/qxl_release.c create mode 100644 drivers/gpu/drm/qxl/qxl_ttm.c (limited to 'drivers/gpu/drm/qxl') diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig new file mode 100644 index 000000000..ca3f51c2a --- /dev/null +++ b/drivers/gpu/drm/qxl/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +config DRM_QXL + tristate "QXL virtual GPU" + depends on DRM && PCI && MMU + select DRM_KMS_HELPER + select DRM_TTM + select DRM_TTM_HELPER + select CRC32 + help + QXL virtual GPU for Spice virtualization desktop integration. + Do not enable this driver unless your distro ships a corresponding + X.org QXL driver that can handle kernel modesetting. diff --git a/drivers/gpu/drm/qxl/Makefile b/drivers/gpu/drm/qxl/Makefile new file mode 100644 index 000000000..1b6c20121 --- /dev/null +++ b/drivers/gpu/drm/qxl/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the drm device driver. This driver provides support for the +# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. + +qxl-y := qxl_drv.o qxl_kms.o qxl_display.o qxl_ttm.o qxl_object.o qxl_gem.o qxl_cmd.o qxl_image.o qxl_draw.o qxl_debugfs.o qxl_irq.o qxl_dumb.o qxl_ioctl.o qxl_release.o qxl_prime.o + +obj-$(CONFIG_DRM_QXL)+= qxl.o diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c new file mode 100644 index 000000000..63aa96a69 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -0,0 +1,651 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alon Levy + */ + +/* QXL cmd/ring handling */ + +#include + +#include + +#include "qxl_drv.h" +#include "qxl_object.h" + +static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap); + +struct ring { + struct qxl_ring_header header; + uint8_t elements[]; +}; + +struct qxl_ring { + struct ring *ring; + int element_size; + int n_elements; + int prod_notify; + wait_queue_head_t *push_event; + spinlock_t lock; +}; + +void qxl_ring_free(struct qxl_ring *ring) +{ + kfree(ring); +} + +struct qxl_ring * +qxl_ring_create(struct qxl_ring_header *header, + int element_size, + int n_elements, + int prod_notify, + wait_queue_head_t *push_event) +{ + struct qxl_ring *ring; + + ring = kmalloc(sizeof(*ring), GFP_KERNEL); + if (!ring) + return NULL; + + ring->ring = (struct ring *)header; + ring->element_size = element_size; + ring->n_elements = n_elements; + ring->prod_notify = prod_notify; + ring->push_event = push_event; + spin_lock_init(&ring->lock); + return ring; +} + +static int qxl_check_header(struct qxl_ring *ring) +{ + int ret; + struct qxl_ring_header *header = &(ring->ring->header); + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + ret = header->prod - header->cons < header->num_items; + if (ret == 0) + header->notify_on_cons = header->cons + 1; + spin_unlock_irqrestore(&ring->lock, flags); + return ret; +} + +int qxl_check_idle(struct qxl_ring *ring) +{ + int ret; + struct qxl_ring_header *header = &(ring->ring->header); + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + ret = header->prod == header->cons; + spin_unlock_irqrestore(&ring->lock, flags); + return ret; +} + +int qxl_ring_push(struct qxl_ring *ring, + const void *new_elt, bool interruptible) +{ + struct qxl_ring_header *header = &(ring->ring->header); + uint8_t *elt; + int idx, ret; + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + if (header->prod - header->cons == header->num_items) { + header->notify_on_cons = header->cons + 1; + mb(); + spin_unlock_irqrestore(&ring->lock, flags); + if (!drm_can_sleep()) { + while (!qxl_check_header(ring)) + udelay(1); + } else { + if (interruptible) { + ret = wait_event_interruptible(*ring->push_event, + qxl_check_header(ring)); + if (ret) + return ret; + } else { + wait_event(*ring->push_event, + qxl_check_header(ring)); + } + + } + spin_lock_irqsave(&ring->lock, flags); + } + + idx = header->prod & (ring->n_elements - 1); + elt = ring->ring->elements + idx * ring->element_size; + + memcpy((void *)elt, new_elt, ring->element_size); + + header->prod++; + + mb(); + + if (header->prod == header->notify_on_prod) + outb(0, ring->prod_notify); + + spin_unlock_irqrestore(&ring->lock, flags); + return 0; +} + +static bool qxl_ring_pop(struct qxl_ring *ring, + void *element) +{ + volatile struct qxl_ring_header *header = &(ring->ring->header); + volatile uint8_t *ring_elt; + int idx; + unsigned long flags; + + spin_lock_irqsave(&ring->lock, flags); + if (header->cons == header->prod) { + header->notify_on_prod = header->cons + 1; + spin_unlock_irqrestore(&ring->lock, flags); + return false; + } + + idx = header->cons & (ring->n_elements - 1); + ring_elt = ring->ring->elements + idx * ring->element_size; + + memcpy(element, (void *)ring_elt, ring->element_size); + + header->cons++; + + spin_unlock_irqrestore(&ring->lock, flags); + return true; +} + +int +qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, + uint32_t type, bool interruptible) +{ + struct qxl_command cmd; + + cmd.type = type; + cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset); + + return qxl_ring_push(qdev->command_ring, &cmd, interruptible); +} + +int +qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, + uint32_t type, bool interruptible) +{ + struct qxl_command cmd; + + cmd.type = type; + cmd.data = qxl_bo_physical_address(qdev, release->release_bo, release->release_offset); + + return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); +} + +bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush) +{ + if (!qxl_check_idle(qdev->release_ring)) { + schedule_work(&qdev->gc_work); + if (flush) + flush_work(&qdev->gc_work); + return true; + } + return false; +} + +int qxl_garbage_collect(struct qxl_device *qdev) +{ + struct qxl_release *release; + uint64_t id, next_id; + int i = 0; + union qxl_release_info *info; + + while (qxl_ring_pop(qdev->release_ring, &id)) { + DRM_DEBUG_DRIVER("popped %lld\n", id); + while (id) { + release = qxl_release_from_id_locked(qdev, id); + if (release == NULL) + break; + + info = qxl_release_map(qdev, release); + next_id = info->next; + qxl_release_unmap(qdev, release, info); + + DRM_DEBUG_DRIVER("popped %lld, next %lld\n", id, + next_id); + + switch (release->type) { + case QXL_RELEASE_DRAWABLE: + case QXL_RELEASE_SURFACE_CMD: + case QXL_RELEASE_CURSOR_CMD: + break; + default: + DRM_ERROR("unexpected release type\n"); + break; + } + id = next_id; + + qxl_release_free(qdev, release); + ++i; + } + } + + wake_up_all(&qdev->release_event); + DRM_DEBUG_DRIVER("%d\n", i); + + return i; +} + +int qxl_alloc_bo_reserved(struct qxl_device *qdev, + struct qxl_release *release, + unsigned long size, + struct qxl_bo **_bo) +{ + struct qxl_bo *bo; + int ret; + + ret = qxl_bo_create(qdev, size, false /* not kernel - device */, + false, QXL_GEM_DOMAIN_VRAM, 0, NULL, &bo); + if (ret) { + DRM_ERROR("failed to allocate VRAM BO\n"); + return ret; + } + ret = qxl_release_list_add(release, bo); + if (ret) + goto out_unref; + + *_bo = bo; + return 0; +out_unref: + qxl_bo_unref(&bo); + return ret; +} + +static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr) +{ + int irq_num; + long addr = qdev->io_base + port; + int ret; + + mutex_lock(&qdev->async_io_mutex); + irq_num = atomic_read(&qdev->irq_received_io_cmd); + if (qdev->last_sent_io_cmd > irq_num) { + if (intr) + ret = wait_event_interruptible_timeout(qdev->io_cmd_event, + atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); + else + ret = wait_event_timeout(qdev->io_cmd_event, + atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); + /* 0 is timeout, just bail the "hw" has gone away */ + if (ret <= 0) + goto out; + irq_num = atomic_read(&qdev->irq_received_io_cmd); + } + outb(val, addr); + qdev->last_sent_io_cmd = irq_num + 1; + if (intr) + ret = wait_event_interruptible_timeout(qdev->io_cmd_event, + atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); + else + ret = wait_event_timeout(qdev->io_cmd_event, + atomic_read(&qdev->irq_received_io_cmd) > irq_num, 5*HZ); +out: + if (ret > 0) + ret = 0; + mutex_unlock(&qdev->async_io_mutex); + return ret; +} + +static void wait_for_io_cmd(struct qxl_device *qdev, uint8_t val, long port) +{ + int ret; + +restart: + ret = wait_for_io_cmd_user(qdev, val, port, false); + if (ret == -ERESTARTSYS) + goto restart; +} + +int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf, + const struct qxl_rect *area) +{ + int surface_id; + uint32_t surface_width, surface_height; + int ret; + + if (!surf->hw_surf_alloc) + DRM_ERROR("got io update area with no hw surface\n"); + + if (surf->is_primary) + surface_id = 0; + else + surface_id = surf->surface_id; + surface_width = surf->surf.width; + surface_height = surf->surf.height; + + if (area->left < 0 || area->top < 0 || + area->right > surface_width || area->bottom > surface_height) + return -EINVAL; + + mutex_lock(&qdev->update_area_mutex); + qdev->ram_header->update_area = *area; + qdev->ram_header->update_surface = surface_id; + ret = wait_for_io_cmd_user(qdev, 0, QXL_IO_UPDATE_AREA_ASYNC, true); + mutex_unlock(&qdev->update_area_mutex); + return ret; +} + +void qxl_io_notify_oom(struct qxl_device *qdev) +{ + outb(0, qdev->io_base + QXL_IO_NOTIFY_OOM); +} + +void qxl_io_flush_release(struct qxl_device *qdev) +{ + outb(0, qdev->io_base + QXL_IO_FLUSH_RELEASE); +} + +void qxl_io_flush_surfaces(struct qxl_device *qdev) +{ + wait_for_io_cmd(qdev, 0, QXL_IO_FLUSH_SURFACES_ASYNC); +} + +void qxl_io_destroy_primary(struct qxl_device *qdev) +{ + wait_for_io_cmd(qdev, 0, QXL_IO_DESTROY_PRIMARY_ASYNC); + qdev->primary_bo->is_primary = false; + drm_gem_object_put(&qdev->primary_bo->tbo.base); + qdev->primary_bo = NULL; +} + +void qxl_io_create_primary(struct qxl_device *qdev, struct qxl_bo *bo) +{ + struct qxl_surface_create *create; + + if (WARN_ON(qdev->primary_bo)) + return; + + DRM_DEBUG_DRIVER("qdev %p, ram_header %p\n", qdev, qdev->ram_header); + create = &qdev->ram_header->create_surface; + create->format = bo->surf.format; + create->width = bo->surf.width; + create->height = bo->surf.height; + create->stride = bo->surf.stride; + create->mem = qxl_bo_physical_address(qdev, bo, 0); + + DRM_DEBUG_DRIVER("mem = %llx, from %p\n", create->mem, bo->kptr); + + create->flags = QXL_SURF_FLAG_KEEP_DATA; + create->type = QXL_SURF_TYPE_PRIMARY; + + wait_for_io_cmd(qdev, 0, QXL_IO_CREATE_PRIMARY_ASYNC); + qdev->primary_bo = bo; + qdev->primary_bo->is_primary = true; + drm_gem_object_get(&qdev->primary_bo->tbo.base); +} + +void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id) +{ + DRM_DEBUG_DRIVER("qxl_memslot_add %d\n", id); + wait_for_io_cmd(qdev, id, QXL_IO_MEMSLOT_ADD_ASYNC); +} + +void qxl_io_reset(struct qxl_device *qdev) +{ + outb(0, qdev->io_base + QXL_IO_RESET); +} + +void qxl_io_monitors_config(struct qxl_device *qdev) +{ + wait_for_io_cmd(qdev, 0, QXL_IO_MONITORS_CONFIG_ASYNC); +} + +int qxl_surface_id_alloc(struct qxl_device *qdev, + struct qxl_bo *surf) +{ + uint32_t handle; + int idr_ret; + int count = 0; +again: + idr_preload(GFP_ATOMIC); + spin_lock(&qdev->surf_id_idr_lock); + idr_ret = idr_alloc(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT); + spin_unlock(&qdev->surf_id_idr_lock); + idr_preload_end(); + if (idr_ret < 0) + return idr_ret; + handle = idr_ret; + + if (handle >= qdev->rom->n_surfaces) { + count++; + spin_lock(&qdev->surf_id_idr_lock); + idr_remove(&qdev->surf_id_idr, handle); + spin_unlock(&qdev->surf_id_idr_lock); + qxl_reap_surface_id(qdev, 2); + goto again; + } + surf->surface_id = handle; + + spin_lock(&qdev->surf_id_idr_lock); + qdev->last_alloced_surf_id = handle; + spin_unlock(&qdev->surf_id_idr_lock); + return 0; +} + +void qxl_surface_id_dealloc(struct qxl_device *qdev, + uint32_t surface_id) +{ + spin_lock(&qdev->surf_id_idr_lock); + idr_remove(&qdev->surf_id_idr, surface_id); + spin_unlock(&qdev->surf_id_idr_lock); +} + +int qxl_hw_surface_alloc(struct qxl_device *qdev, + struct qxl_bo *surf) +{ + struct qxl_surface_cmd *cmd; + struct qxl_release *release; + int ret; + + if (surf->hw_surf_alloc) + return 0; + + ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_CREATE, + NULL, + &release); + if (ret) + return ret; + + ret = qxl_release_reserve_list(release, true); + if (ret) { + qxl_release_free(qdev, release); + return ret; + } + cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); + cmd->type = QXL_SURFACE_CMD_CREATE; + cmd->flags = QXL_SURF_FLAG_KEEP_DATA; + cmd->u.surface_create.format = surf->surf.format; + cmd->u.surface_create.width = surf->surf.width; + cmd->u.surface_create.height = surf->surf.height; + cmd->u.surface_create.stride = surf->surf.stride; + cmd->u.surface_create.data = qxl_bo_physical_address(qdev, surf, 0); + cmd->surface_id = surf->surface_id; + qxl_release_unmap(qdev, release, &cmd->release_info); + + surf->surf_create = release; + + /* no need to add a release to the fence for this surface bo, + since it is only released when we ask to destroy the surface + and it would never signal otherwise */ + qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); + + surf->hw_surf_alloc = true; + spin_lock(&qdev->surf_id_idr_lock); + idr_replace(&qdev->surf_id_idr, surf, surf->surface_id); + spin_unlock(&qdev->surf_id_idr_lock); + return 0; +} + +int qxl_hw_surface_dealloc(struct qxl_device *qdev, + struct qxl_bo *surf) +{ + struct qxl_surface_cmd *cmd; + struct qxl_release *release; + int ret; + int id; + + if (!surf->hw_surf_alloc) + return 0; + + ret = qxl_alloc_surface_release_reserved(qdev, QXL_SURFACE_CMD_DESTROY, + surf->surf_create, + &release); + if (ret) + return ret; + + surf->surf_create = NULL; + /* remove the surface from the idr, but not the surface id yet */ + spin_lock(&qdev->surf_id_idr_lock); + idr_replace(&qdev->surf_id_idr, NULL, surf->surface_id); + spin_unlock(&qdev->surf_id_idr_lock); + surf->hw_surf_alloc = false; + + id = surf->surface_id; + surf->surface_id = 0; + + release->surface_release_id = id; + cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); + cmd->type = QXL_SURFACE_CMD_DESTROY; + cmd->surface_id = id; + qxl_release_unmap(qdev, release, &cmd->release_info); + + qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); + + return 0; +} + +static int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf) +{ + struct qxl_rect rect; + int ret; + + /* if we are evicting, we need to make sure the surface is up + to date */ + rect.left = 0; + rect.right = surf->surf.width; + rect.top = 0; + rect.bottom = surf->surf.height; +retry: + ret = qxl_io_update_area(qdev, surf, &rect); + if (ret == -ERESTARTSYS) + goto retry; + return ret; +} + +static void qxl_surface_evict_locked(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area) +{ + /* no need to update area if we are just freeing the surface normally */ + if (do_update_area) + qxl_update_surface(qdev, surf); + + /* nuke the surface id at the hw */ + qxl_hw_surface_dealloc(qdev, surf); +} + +void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool do_update_area) +{ + mutex_lock(&qdev->surf_evict_mutex); + qxl_surface_evict_locked(qdev, surf, do_update_area); + mutex_unlock(&qdev->surf_evict_mutex); +} + +static int qxl_reap_surf(struct qxl_device *qdev, struct qxl_bo *surf, bool stall) +{ + int ret; + + ret = qxl_bo_reserve(surf); + if (ret) + return ret; + + if (stall) + mutex_unlock(&qdev->surf_evict_mutex); + + ret = ttm_bo_wait(&surf->tbo, true, !stall); + + if (stall) + mutex_lock(&qdev->surf_evict_mutex); + if (ret) { + qxl_bo_unreserve(surf); + return ret; + } + + qxl_surface_evict_locked(qdev, surf, true); + qxl_bo_unreserve(surf); + return 0; +} + +static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap) +{ + int num_reaped = 0; + int i, ret; + bool stall = false; + int start = 0; + + mutex_lock(&qdev->surf_evict_mutex); +again: + + spin_lock(&qdev->surf_id_idr_lock); + start = qdev->last_alloced_surf_id + 1; + spin_unlock(&qdev->surf_id_idr_lock); + + for (i = start; i < start + qdev->rom->n_surfaces; i++) { + void *objptr; + int surfid = i % qdev->rom->n_surfaces; + + /* this avoids the case where the objects is in the + idr but has been evicted half way - its makes + the idr lookup atomic with the eviction */ + spin_lock(&qdev->surf_id_idr_lock); + objptr = idr_find(&qdev->surf_id_idr, surfid); + spin_unlock(&qdev->surf_id_idr_lock); + + if (!objptr) + continue; + + ret = qxl_reap_surf(qdev, objptr, stall); + if (ret == 0) + num_reaped++; + if (num_reaped >= max_to_reap) + break; + } + if (num_reaped == 0 && stall == false) { + stall = true; + goto again; + } + + mutex_unlock(&qdev->surf_evict_mutex); + if (num_reaped) { + usleep_range(500, 1000); + qxl_queue_garbage_collect(qdev, true); + } + + return 0; +} diff --git a/drivers/gpu/drm/qxl/qxl_debugfs.c b/drivers/gpu/drm/qxl/qxl_debugfs.c new file mode 100644 index 000000000..2d9ed3b94 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_debugfs.c @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2009 Red Hat + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + * + */ + +/* + * Authors: + * Alon Levy + */ + +#include +#include + +#include "qxl_drv.h" +#include "qxl_object.h" + +#if defined(CONFIG_DEBUG_FS) +static int +qxl_debugfs_irq_received(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct qxl_device *qdev = to_qxl(node->minor->dev); + + seq_printf(m, "%d\n", atomic_read(&qdev->irq_received)); + seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_display)); + seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_cursor)); + seq_printf(m, "%d\n", atomic_read(&qdev->irq_received_io_cmd)); + seq_printf(m, "%d\n", qdev->irq_received_error); + return 0; +} + +static int +qxl_debugfs_buffers_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct qxl_device *qdev = to_qxl(node->minor->dev); + struct qxl_bo *bo; + + list_for_each_entry(bo, &qdev->gem.objects, list) { + struct dma_resv_iter cursor; + struct dma_fence *fence; + int rel = 0; + + dma_resv_iter_begin(&cursor, bo->tbo.base.resv, + DMA_RESV_USAGE_BOOKKEEP); + dma_resv_for_each_fence_unlocked(&cursor, fence) { + if (dma_resv_iter_is_restarted(&cursor)) + rel = 0; + ++rel; + } + + seq_printf(m, "size %ld, pc %d, num releases %d\n", + (unsigned long)bo->tbo.base.size, + bo->tbo.pin_count, rel); + } + return 0; +} + +static struct drm_info_list qxl_debugfs_list[] = { + { "irq_received", qxl_debugfs_irq_received, 0, NULL }, + { "qxl_buffers", qxl_debugfs_buffers_info, 0, NULL }, +}; +#define QXL_DEBUGFS_ENTRIES ARRAY_SIZE(qxl_debugfs_list) +#endif + +void +qxl_debugfs_init(struct drm_minor *minor) +{ +#if defined(CONFIG_DEBUG_FS) + struct qxl_device *dev = to_qxl(minor->dev); + + drm_debugfs_create_files(qxl_debugfs_list, QXL_DEBUGFS_ENTRIES, + minor->debugfs_root, minor); + + qxl_ttm_debugfs_init(dev); +#endif +} + +void qxl_debugfs_add_files(struct qxl_device *qdev, + struct drm_info_list *files, + unsigned int nfiles) +{ + unsigned int i; + + for (i = 0; i < qdev->debugfs_count; i++) { + if (qdev->debugfs[i].files == files) { + /* Already registered */ + return; + } + } + + i = qdev->debugfs_count + 1; + if (i > QXL_DEBUGFS_MAX_COMPONENTS) { + DRM_ERROR("Reached maximum number of debugfs components.\n"); + DRM_ERROR("Report so we increase QXL_DEBUGFS_MAX_COMPONENTS.\n"); + return; + } + qdev->debugfs[qdev->debugfs_count].files = files; + qdev->debugfs[qdev->debugfs_count].num_files = nfiles; + qdev->debugfs_count = i; +#if defined(CONFIG_DEBUG_FS) + drm_debugfs_create_files(files, nfiles, + qdev->ddev.primary->debugfs_root, + qdev->ddev.primary); +#endif +} diff --git a/drivers/gpu/drm/qxl/qxl_dev.h b/drivers/gpu/drm/qxl/qxl_dev.h new file mode 100644 index 000000000..06caa61b5 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_dev.h @@ -0,0 +1,876 @@ +/* + Copyright (C) 2009 Red Hat, Inc. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions are + met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER AND CONTRIBUTORS "AS + IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A + PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef H_QXL_DEV +#define H_QXL_DEV + +#include + +/* + * from spice-protocol + * Release 0.10.0 + */ + +/* enums.h */ + +enum SpiceImageType { + SPICE_IMAGE_TYPE_BITMAP, + SPICE_IMAGE_TYPE_QUIC, + SPICE_IMAGE_TYPE_RESERVED, + SPICE_IMAGE_TYPE_LZ_PLT = 100, + SPICE_IMAGE_TYPE_LZ_RGB, + SPICE_IMAGE_TYPE_GLZ_RGB, + SPICE_IMAGE_TYPE_FROM_CACHE, + SPICE_IMAGE_TYPE_SURFACE, + SPICE_IMAGE_TYPE_JPEG, + SPICE_IMAGE_TYPE_FROM_CACHE_LOSSLESS, + SPICE_IMAGE_TYPE_ZLIB_GLZ_RGB, + SPICE_IMAGE_TYPE_JPEG_ALPHA, + + SPICE_IMAGE_TYPE_ENUM_END +}; + +enum SpiceBitmapFmt { + SPICE_BITMAP_FMT_INVALID, + SPICE_BITMAP_FMT_1BIT_LE, + SPICE_BITMAP_FMT_1BIT_BE, + SPICE_BITMAP_FMT_4BIT_LE, + SPICE_BITMAP_FMT_4BIT_BE, + SPICE_BITMAP_FMT_8BIT, + SPICE_BITMAP_FMT_16BIT, + SPICE_BITMAP_FMT_24BIT, + SPICE_BITMAP_FMT_32BIT, + SPICE_BITMAP_FMT_RGBA, + + SPICE_BITMAP_FMT_ENUM_END +}; + +enum SpiceSurfaceFmt { + SPICE_SURFACE_FMT_INVALID, + SPICE_SURFACE_FMT_1_A, + SPICE_SURFACE_FMT_8_A = 8, + SPICE_SURFACE_FMT_16_555 = 16, + SPICE_SURFACE_FMT_32_xRGB = 32, + SPICE_SURFACE_FMT_16_565 = 80, + SPICE_SURFACE_FMT_32_ARGB = 96, + + SPICE_SURFACE_FMT_ENUM_END +}; + +enum SpiceClipType { + SPICE_CLIP_TYPE_NONE, + SPICE_CLIP_TYPE_RECTS, + + SPICE_CLIP_TYPE_ENUM_END +}; + +enum SpiceRopd { + SPICE_ROPD_INVERS_SRC = (1 << 0), + SPICE_ROPD_INVERS_BRUSH = (1 << 1), + SPICE_ROPD_INVERS_DEST = (1 << 2), + SPICE_ROPD_OP_PUT = (1 << 3), + SPICE_ROPD_OP_OR = (1 << 4), + SPICE_ROPD_OP_AND = (1 << 5), + SPICE_ROPD_OP_XOR = (1 << 6), + SPICE_ROPD_OP_BLACKNESS = (1 << 7), + SPICE_ROPD_OP_WHITENESS = (1 << 8), + SPICE_ROPD_OP_INVERS = (1 << 9), + SPICE_ROPD_INVERS_RES = (1 << 10), + + SPICE_ROPD_MASK = 0x7ff +}; + +enum SpiceBrushType { + SPICE_BRUSH_TYPE_NONE, + SPICE_BRUSH_TYPE_SOLID, + SPICE_BRUSH_TYPE_PATTERN, + + SPICE_BRUSH_TYPE_ENUM_END +}; + +enum SpiceCursorType { + SPICE_CURSOR_TYPE_ALPHA, + SPICE_CURSOR_TYPE_MONO, + SPICE_CURSOR_TYPE_COLOR4, + SPICE_CURSOR_TYPE_COLOR8, + SPICE_CURSOR_TYPE_COLOR16, + SPICE_CURSOR_TYPE_COLOR24, + SPICE_CURSOR_TYPE_COLOR32, + + SPICE_CURSOR_TYPE_ENUM_END +}; + +/* qxl_dev.h */ + +#pragma pack(push, 1) + +/* 0x100-0x11f reserved for spice, 0x1ff used for unstable work */ +#define QXL_DEVICE_ID_STABLE 0x0100 + +enum { + QXL_REVISION_STABLE_V04 = 0x01, + QXL_REVISION_STABLE_V06 = 0x02, + QXL_REVISION_STABLE_V10 = 0x03, + QXL_REVISION_STABLE_V12 = 0x04, +}; + +#define QXL_DEVICE_ID_DEVEL 0x01ff +#define QXL_REVISION_DEVEL 0x01 + +#define QXL_ROM_MAGIC (*(uint32_t *)"QXRO") +#define QXL_RAM_MAGIC (*(uint32_t *)"QXRA") + +enum { + QXL_RAM_RANGE_INDEX, + QXL_VRAM_RANGE_INDEX, + QXL_ROM_RANGE_INDEX, + QXL_IO_RANGE_INDEX, + + QXL_PCI_RANGES +}; + +/* qxl-1 compat: append only */ +enum { + QXL_IO_NOTIFY_CMD, + QXL_IO_NOTIFY_CURSOR, + QXL_IO_UPDATE_AREA, + QXL_IO_UPDATE_IRQ, + QXL_IO_NOTIFY_OOM, + QXL_IO_RESET, + QXL_IO_SET_MODE, /* qxl-1 */ + QXL_IO_LOG, + /* appended for qxl-2 */ + QXL_IO_MEMSLOT_ADD, + QXL_IO_MEMSLOT_DEL, + QXL_IO_DETACH_PRIMARY, + QXL_IO_ATTACH_PRIMARY, + QXL_IO_CREATE_PRIMARY, + QXL_IO_DESTROY_PRIMARY, + QXL_IO_DESTROY_SURFACE_WAIT, + QXL_IO_DESTROY_ALL_SURFACES, + /* appended for qxl-3 */ + QXL_IO_UPDATE_AREA_ASYNC, + QXL_IO_MEMSLOT_ADD_ASYNC, + QXL_IO_CREATE_PRIMARY_ASYNC, + QXL_IO_DESTROY_PRIMARY_ASYNC, + QXL_IO_DESTROY_SURFACE_ASYNC, + QXL_IO_DESTROY_ALL_SURFACES_ASYNC, + QXL_IO_FLUSH_SURFACES_ASYNC, + QXL_IO_FLUSH_RELEASE, + /* appended for qxl-4 */ + QXL_IO_MONITORS_CONFIG_ASYNC, + + QXL_IO_RANGE_SIZE +}; + +typedef uint64_t QXLPHYSICAL; +typedef int32_t QXLFIXED; /* fixed 28.4 */ + +struct qxl_point_fix { + QXLFIXED x; + QXLFIXED y; +}; + +struct qxl_point { + int32_t x; + int32_t y; +}; + +struct qxl_point_1_6 { + int16_t x; + int16_t y; +}; + +struct qxl_rect { + int32_t top; + int32_t left; + int32_t bottom; + int32_t right; +}; + +struct qxl_urect { + uint32_t top; + uint32_t left; + uint32_t bottom; + uint32_t right; +}; + +/* qxl-1 compat: append only */ +struct qxl_rom { + uint32_t magic; + uint32_t id; + uint32_t update_id; + uint32_t compression_level; + uint32_t log_level; + uint32_t mode; /* qxl-1 */ + uint32_t modes_offset; + uint32_t num_io_pages; + uint32_t pages_offset; /* qxl-1 */ + uint32_t draw_area_offset; /* qxl-1 */ + uint32_t surface0_area_size; /* qxl-1 name: draw_area_size */ + uint32_t ram_header_offset; + uint32_t mm_clock; + /* appended for qxl-2 */ + uint32_t n_surfaces; + uint64_t flags; + uint8_t slots_start; + uint8_t slots_end; + uint8_t slot_gen_bits; + uint8_t slot_id_bits; + uint8_t slot_generation; + /* appended for qxl-4 */ + uint8_t client_present; + uint8_t client_capabilities[58]; + uint32_t client_monitors_config_crc; + struct { + uint16_t count; + uint16_t padding; + struct qxl_urect heads[64]; + } client_monitors_config; +}; + +/* qxl-1 compat: fixed */ +struct qxl_mode { + uint32_t id; + uint32_t x_res; + uint32_t y_res; + uint32_t bits; + uint32_t stride; + uint32_t x_mili; + uint32_t y_mili; + uint32_t orientation; +}; + +/* qxl-1 compat: fixed */ +struct qxl_modes { + uint32_t n_modes; + struct qxl_mode modes[]; +}; + +/* qxl-1 compat: append only */ +enum qxl_cmd_type { + QXL_CMD_NOP, + QXL_CMD_DRAW, + QXL_CMD_UPDATE, + QXL_CMD_CURSOR, + QXL_CMD_MESSAGE, + QXL_CMD_SURFACE, +}; + +/* qxl-1 compat: fixed */ +struct qxl_command { + QXLPHYSICAL data; + uint32_t type; + uint32_t padding; +}; + +#define QXL_COMMAND_FLAG_COMPAT (1<<0) +#define QXL_COMMAND_FLAG_COMPAT_16BPP (2<<0) + +struct qxl_command_ext { + struct qxl_command cmd; + uint32_t group_id; + uint32_t flags; +}; + +struct qxl_mem_slot { + uint64_t mem_start; + uint64_t mem_end; +}; + +#define QXL_SURF_TYPE_PRIMARY 0 + +#define QXL_SURF_FLAG_KEEP_DATA (1 << 0) + +struct qxl_surface_create { + uint32_t width; + uint32_t height; + int32_t stride; + uint32_t format; + uint32_t position; + uint32_t mouse_mode; + uint32_t flags; + uint32_t type; + QXLPHYSICAL mem; +}; + +#define QXL_COMMAND_RING_SIZE 32 +#define QXL_CURSOR_RING_SIZE 32 +#define QXL_RELEASE_RING_SIZE 8 + +#define QXL_LOG_BUF_SIZE 4096 + +#define QXL_INTERRUPT_DISPLAY (1 << 0) +#define QXL_INTERRUPT_CURSOR (1 << 1) +#define QXL_INTERRUPT_IO_CMD (1 << 2) +#define QXL_INTERRUPT_ERROR (1 << 3) +#define QXL_INTERRUPT_CLIENT (1 << 4) +#define QXL_INTERRUPT_CLIENT_MONITORS_CONFIG (1 << 5) + +struct qxl_ring_header { + uint32_t num_items; + uint32_t prod; + uint32_t notify_on_prod; + uint32_t cons; + uint32_t notify_on_cons; +}; + +/* qxl-1 compat: append only */ +struct qxl_ram_header { + uint32_t magic; + uint32_t int_pending; + uint32_t int_mask; + uint8_t log_buf[QXL_LOG_BUF_SIZE]; + struct qxl_ring_header cmd_ring_hdr; + struct qxl_command cmd_ring[QXL_COMMAND_RING_SIZE]; + struct qxl_ring_header cursor_ring_hdr; + struct qxl_command cursor_ring[QXL_CURSOR_RING_SIZE]; + struct qxl_ring_header release_ring_hdr; + uint64_t release_ring[QXL_RELEASE_RING_SIZE]; + struct qxl_rect update_area; + /* appended for qxl-2 */ + uint32_t update_surface; + struct qxl_mem_slot mem_slot; + struct qxl_surface_create create_surface; + uint64_t flags; + + /* appended for qxl-4 */ + + /* used by QXL_IO_MONITORS_CONFIG_ASYNC */ + QXLPHYSICAL monitors_config; + uint8_t guest_capabilities[64]; +}; + +union qxl_release_info { + uint64_t id; /* in */ + uint64_t next; /* out */ +}; + +struct qxl_release_info_ext { + union qxl_release_info *info; + uint32_t group_id; +}; + +struct qxl_data_chunk { + uint32_t data_size; + QXLPHYSICAL prev_chunk; + QXLPHYSICAL next_chunk; + uint8_t data[]; +}; + +struct qxl_message { + union qxl_release_info release_info; + uint8_t data[]; +}; + +struct qxl_compat_update_cmd { + union qxl_release_info release_info; + struct qxl_rect area; + uint32_t update_id; +}; + +struct qxl_update_cmd { + union qxl_release_info release_info; + struct qxl_rect area; + uint32_t update_id; + uint32_t surface_id; +}; + +struct qxl_cursor_header { + uint64_t unique; + uint16_t type; + uint16_t width; + uint16_t height; + uint16_t hot_spot_x; + uint16_t hot_spot_y; +}; + +struct qxl_cursor { + struct qxl_cursor_header header; + uint32_t data_size; + struct qxl_data_chunk chunk; +}; + +enum { + QXL_CURSOR_SET, + QXL_CURSOR_MOVE, + QXL_CURSOR_HIDE, + QXL_CURSOR_TRAIL, +}; + +#define QXL_CURSOR_DEVICE_DATA_SIZE 128 + +struct qxl_cursor_cmd { + union qxl_release_info release_info; + uint8_t type; + union { + struct { + struct qxl_point_1_6 position; + uint8_t visible; + QXLPHYSICAL shape; + } set; + struct { + uint16_t length; + uint16_t frequency; + } trail; + struct qxl_point_1_6 position; + } u; + /* todo: dynamic size from rom */ + uint8_t device_data[QXL_CURSOR_DEVICE_DATA_SIZE]; +}; + +enum { + QXL_DRAW_NOP, + QXL_DRAW_FILL, + QXL_DRAW_OPAQUE, + QXL_DRAW_COPY, + QXL_COPY_BITS, + QXL_DRAW_BLEND, + QXL_DRAW_BLACKNESS, + QXL_DRAW_WHITENESS, + QXL_DRAW_INVERS, + QXL_DRAW_ROP3, + QXL_DRAW_STROKE, + QXL_DRAW_TEXT, + QXL_DRAW_TRANSPARENT, + QXL_DRAW_ALPHA_BLEND, + QXL_DRAW_COMPOSITE +}; + +struct qxl_raster_glyph { + struct qxl_point render_pos; + struct qxl_point glyph_origin; + uint16_t width; + uint16_t height; + uint8_t data[]; +}; + +struct qxl_string { + uint32_t data_size; + uint16_t length; + uint16_t flags; + struct qxl_data_chunk chunk; +}; + +struct qxl_copy_bits { + struct qxl_point src_pos; +}; + +enum qxl_effect_type { + QXL_EFFECT_BLEND = 0, + QXL_EFFECT_OPAQUE = 1, + QXL_EFFECT_REVERT_ON_DUP = 2, + QXL_EFFECT_BLACKNESS_ON_DUP = 3, + QXL_EFFECT_WHITENESS_ON_DUP = 4, + QXL_EFFECT_NOP_ON_DUP = 5, + QXL_EFFECT_NOP = 6, + QXL_EFFECT_OPAQUE_BRUSH = 7 +}; + +struct qxl_pattern { + QXLPHYSICAL pat; + struct qxl_point pos; +}; + +struct qxl_brush { + uint32_t type; + union { + uint32_t color; + struct qxl_pattern pattern; + } u; +}; + +struct qxl_q_mask { + uint8_t flags; + struct qxl_point pos; + QXLPHYSICAL bitmap; +}; + +struct qxl_fill { + struct qxl_brush brush; + uint16_t rop_descriptor; + struct qxl_q_mask mask; +}; + +struct qxl_opaque { + QXLPHYSICAL src_bitmap; + struct qxl_rect src_area; + struct qxl_brush brush; + uint16_t rop_descriptor; + uint8_t scale_mode; + struct qxl_q_mask mask; +}; + +struct qxl_copy { + QXLPHYSICAL src_bitmap; + struct qxl_rect src_area; + uint16_t rop_descriptor; + uint8_t scale_mode; + struct qxl_q_mask mask; +}; + +struct qxl_transparent { + QXLPHYSICAL src_bitmap; + struct qxl_rect src_area; + uint32_t src_color; + uint32_t true_color; +}; + +struct qxl_alpha_blend { + uint16_t alpha_flags; + uint8_t alpha; + QXLPHYSICAL src_bitmap; + struct qxl_rect src_area; +}; + +struct qxl_compat_alpha_blend { + uint8_t alpha; + QXLPHYSICAL src_bitmap; + struct qxl_rect src_area; +}; + +struct qxl_rop_3 { + QXLPHYSICAL src_bitmap; + struct qxl_rect src_area; + struct qxl_brush brush; + uint8_t rop3; + uint8_t scale_mode; + struct qxl_q_mask mask; +}; + +struct qxl_line_attr { + uint8_t flags; + uint8_t join_style; + uint8_t end_style; + uint8_t style_nseg; + QXLFIXED width; + QXLFIXED miter_limit; + QXLPHYSICAL style; +}; + +struct qxl_stroke { + QXLPHYSICAL path; + struct qxl_line_attr attr; + struct qxl_brush brush; + uint16_t fore_mode; + uint16_t back_mode; +}; + +struct qxl_text { + QXLPHYSICAL str; + struct qxl_rect back_area; + struct qxl_brush fore_brush; + struct qxl_brush back_brush; + uint16_t fore_mode; + uint16_t back_mode; +}; + +struct qxl_mask { + struct qxl_q_mask mask; +}; + +struct qxl_clip { + uint32_t type; + QXLPHYSICAL data; +}; + +enum qxl_operator { + QXL_OP_CLEAR = 0x00, + QXL_OP_SOURCE = 0x01, + QXL_OP_DST = 0x02, + QXL_OP_OVER = 0x03, + QXL_OP_OVER_REVERSE = 0x04, + QXL_OP_IN = 0x05, + QXL_OP_IN_REVERSE = 0x06, + QXL_OP_OUT = 0x07, + QXL_OP_OUT_REVERSE = 0x08, + QXL_OP_ATOP = 0x09, + QXL_OP_ATOP_REVERSE = 0x0a, + QXL_OP_XOR = 0x0b, + QXL_OP_ADD = 0x0c, + QXL_OP_SATURATE = 0x0d, + /* Note the jump here from 0x0d to 0x30 */ + QXL_OP_MULTIPLY = 0x30, + QXL_OP_SCREEN = 0x31, + QXL_OP_OVERLAY = 0x32, + QXL_OP_DARKEN = 0x33, + QXL_OP_LIGHTEN = 0x34, + QXL_OP_COLOR_DODGE = 0x35, + QXL_OP_COLOR_BURN = 0x36, + QXL_OP_HARD_LIGHT = 0x37, + QXL_OP_SOFT_LIGHT = 0x38, + QXL_OP_DIFFERENCE = 0x39, + QXL_OP_EXCLUSION = 0x3a, + QXL_OP_HSL_HUE = 0x3b, + QXL_OP_HSL_SATURATION = 0x3c, + QXL_OP_HSL_COLOR = 0x3d, + QXL_OP_HSL_LUMINOSITY = 0x3e +}; + +struct qxl_transform { + uint32_t t00; + uint32_t t01; + uint32_t t02; + uint32_t t10; + uint32_t t11; + uint32_t t12; +}; + +/* The flags field has the following bit fields: + * + * operator: [ 0 - 7 ] + * src_filter: [ 8 - 10 ] + * mask_filter: [ 11 - 13 ] + * src_repeat: [ 14 - 15 ] + * mask_repeat: [ 16 - 17 ] + * component_alpha: [ 18 - 18 ] + * reserved: [ 19 - 31 ] + * + * The repeat and filter values are those of pixman: + * REPEAT_NONE = 0 + * REPEAT_NORMAL = 1 + * REPEAT_PAD = 2 + * REPEAT_REFLECT = 3 + * + * The filter values are: + * FILTER_NEAREST = 0 + * FILTER_BILINEAR = 1 + */ +struct qxl_composite { + uint32_t flags; + + QXLPHYSICAL src; + QXLPHYSICAL src_transform; /* May be NULL */ + QXLPHYSICAL mask; /* May be NULL */ + QXLPHYSICAL mask_transform; /* May be NULL */ + struct qxl_point_1_6 src_origin; + struct qxl_point_1_6 mask_origin; +}; + +struct qxl_compat_drawable { + union qxl_release_info release_info; + uint8_t effect; + uint8_t type; + uint16_t bitmap_offset; + struct qxl_rect bitmap_area; + struct qxl_rect bbox; + struct qxl_clip clip; + uint32_t mm_time; + union { + struct qxl_fill fill; + struct qxl_opaque opaque; + struct qxl_copy copy; + struct qxl_transparent transparent; + struct qxl_compat_alpha_blend alpha_blend; + struct qxl_copy_bits copy_bits; + struct qxl_copy blend; + struct qxl_rop_3 rop3; + struct qxl_stroke stroke; + struct qxl_text text; + struct qxl_mask blackness; + struct qxl_mask invers; + struct qxl_mask whiteness; + } u; +}; + +struct qxl_drawable { + union qxl_release_info release_info; + uint32_t surface_id; + uint8_t effect; + uint8_t type; + uint8_t self_bitmap; + struct qxl_rect self_bitmap_area; + struct qxl_rect bbox; + struct qxl_clip clip; + uint32_t mm_time; + int32_t surfaces_dest[3]; + struct qxl_rect surfaces_rects[3]; + union { + struct qxl_fill fill; + struct qxl_opaque opaque; + struct qxl_copy copy; + struct qxl_transparent transparent; + struct qxl_alpha_blend alpha_blend; + struct qxl_copy_bits copy_bits; + struct qxl_copy blend; + struct qxl_rop_3 rop3; + struct qxl_stroke stroke; + struct qxl_text text; + struct qxl_mask blackness; + struct qxl_mask invers; + struct qxl_mask whiteness; + struct qxl_composite composite; + } u; +}; + +enum qxl_surface_cmd_type { + QXL_SURFACE_CMD_CREATE, + QXL_SURFACE_CMD_DESTROY, +}; + +struct qxl_surface { + uint32_t format; + uint32_t width; + uint32_t height; + int32_t stride; + QXLPHYSICAL data; +}; + +struct qxl_surface_cmd { + union qxl_release_info release_info; + uint32_t surface_id; + uint8_t type; + uint32_t flags; + union { + struct qxl_surface surface_create; + } u; +}; + +struct qxl_clip_rects { + uint32_t num_rects; + struct qxl_data_chunk chunk; +}; + +enum { + QXL_PATH_BEGIN = (1 << 0), + QXL_PATH_END = (1 << 1), + QXL_PATH_CLOSE = (1 << 3), + QXL_PATH_BEZIER = (1 << 4), +}; + +struct qxl_path_seg { + uint32_t flags; + uint32_t count; + struct qxl_point_fix points[]; +}; + +struct qxl_path { + uint32_t data_size; + struct qxl_data_chunk chunk; +}; + +enum { + QXL_IMAGE_GROUP_DRIVER, + QXL_IMAGE_GROUP_DEVICE, + QXL_IMAGE_GROUP_RED, + QXL_IMAGE_GROUP_DRIVER_DONT_CACHE, +}; + +struct qxl_image_id { + uint32_t group; + uint32_t unique; +}; + +union qxl_image_id_union { + struct qxl_image_id id; + uint64_t value; +}; + +enum qxl_image_flags { + QXL_IMAGE_CACHE = (1 << 0), + QXL_IMAGE_HIGH_BITS_SET = (1 << 1), +}; + +enum qxl_bitmap_flags { + QXL_BITMAP_DIRECT = (1 << 0), + QXL_BITMAP_UNSTABLE = (1 << 1), + QXL_BITMAP_TOP_DOWN = (1 << 2), /* == SPICE_BITMAP_FLAGS_TOP_DOWN */ +}; + +#define QXL_SET_IMAGE_ID(image, _group, _unique) { \ + (image)->descriptor.id = (((uint64_t)_unique) << 32) | _group; \ +} + +struct qxl_image_descriptor { + uint64_t id; + uint8_t type; + uint8_t flags; + uint32_t width; + uint32_t height; +}; + +struct qxl_palette { + uint64_t unique; + uint16_t num_ents; + uint32_t ents[]; +}; + +struct qxl_bitmap { + uint8_t format; + uint8_t flags; + uint32_t x; + uint32_t y; + uint32_t stride; + QXLPHYSICAL palette; + QXLPHYSICAL data; /* data[0] ? */ +}; + +struct qxl_surface_id { + uint32_t surface_id; +}; + +struct qxl_encoder_data { + uint32_t data_size; + uint8_t data[]; +}; + +struct qxl_image { + struct qxl_image_descriptor descriptor; + union { /* variable length */ + struct qxl_bitmap bitmap; + struct qxl_encoder_data quic; + struct qxl_surface_id surface_image; + } u; +}; + +/* A QXLHead is a single monitor output backed by a QXLSurface. + * x and y offsets are unsigned since they are used in relation to + * the given surface, not the same as the x, y coordinates in the guest + * screen reference frame. */ +struct qxl_head { + uint32_t id; + uint32_t surface_id; + uint32_t width; + uint32_t height; + uint32_t x; + uint32_t y; + uint32_t flags; +}; + +struct qxl_monitors_config { + uint16_t count; + uint16_t max_allowed; /* If it is 0 no fixed limit is given by the + driver */ + struct qxl_head heads[]; +}; + +#pragma pack(pop) + +#endif /* _H_QXL_DEV */ diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c new file mode 100644 index 000000000..f91a86225 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -0,0 +1,1291 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alon Levy + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qxl_drv.h" +#include "qxl_object.h" + +static bool qxl_head_enabled(struct qxl_head *head) +{ + return head->width && head->height; +} + +static int qxl_alloc_client_monitors_config(struct qxl_device *qdev, + unsigned int count) +{ + if (qdev->client_monitors_config && + count > qdev->client_monitors_config->count) { + kfree(qdev->client_monitors_config); + qdev->client_monitors_config = NULL; + } + if (!qdev->client_monitors_config) { + qdev->client_monitors_config = kzalloc( + struct_size(qdev->client_monitors_config, + heads, count), GFP_KERNEL); + if (!qdev->client_monitors_config) + return -ENOMEM; + } + qdev->client_monitors_config->count = count; + return 0; +} + +enum { + MONITORS_CONFIG_MODIFIED, + MONITORS_CONFIG_UNCHANGED, + MONITORS_CONFIG_BAD_CRC, + MONITORS_CONFIG_ERROR, +}; + +static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev) +{ + int i; + int num_monitors; + uint32_t crc; + int status = MONITORS_CONFIG_UNCHANGED; + + num_monitors = qdev->rom->client_monitors_config.count; + crc = crc32(0, (const uint8_t *)&qdev->rom->client_monitors_config, + sizeof(qdev->rom->client_monitors_config)); + if (crc != qdev->rom->client_monitors_config_crc) + return MONITORS_CONFIG_BAD_CRC; + if (!num_monitors) { + DRM_DEBUG_KMS("no client monitors configured\n"); + return status; + } + if (num_monitors > qxl_num_crtc) { + DRM_DEBUG_KMS("client monitors list will be truncated: %d < %d\n", + qxl_num_crtc, num_monitors); + num_monitors = qxl_num_crtc; + } else { + num_monitors = qdev->rom->client_monitors_config.count; + } + if (qdev->client_monitors_config + && (num_monitors != qdev->client_monitors_config->count)) { + status = MONITORS_CONFIG_MODIFIED; + } + if (qxl_alloc_client_monitors_config(qdev, num_monitors)) { + status = MONITORS_CONFIG_ERROR; + return status; + } + /* we copy max from the client but it isn't used */ + qdev->client_monitors_config->max_allowed = qxl_num_crtc; + for (i = 0 ; i < qdev->client_monitors_config->count ; ++i) { + struct qxl_urect *c_rect = + &qdev->rom->client_monitors_config.heads[i]; + struct qxl_head *client_head = + &qdev->client_monitors_config->heads[i]; + if (client_head->x != c_rect->left) { + client_head->x = c_rect->left; + status = MONITORS_CONFIG_MODIFIED; + } + if (client_head->y != c_rect->top) { + client_head->y = c_rect->top; + status = MONITORS_CONFIG_MODIFIED; + } + if (client_head->width != c_rect->right - c_rect->left) { + client_head->width = c_rect->right - c_rect->left; + status = MONITORS_CONFIG_MODIFIED; + } + if (client_head->height != c_rect->bottom - c_rect->top) { + client_head->height = c_rect->bottom - c_rect->top; + status = MONITORS_CONFIG_MODIFIED; + } + if (client_head->surface_id != 0) { + client_head->surface_id = 0; + status = MONITORS_CONFIG_MODIFIED; + } + if (client_head->id != i) { + client_head->id = i; + status = MONITORS_CONFIG_MODIFIED; + } + if (client_head->flags != 0) { + client_head->flags = 0; + status = MONITORS_CONFIG_MODIFIED; + } + DRM_DEBUG_KMS("read %dx%d+%d+%d\n", client_head->width, client_head->height, + client_head->x, client_head->y); + } + + return status; +} + +static void qxl_update_offset_props(struct qxl_device *qdev) +{ + struct drm_device *dev = &qdev->ddev; + struct drm_connector *connector; + struct qxl_output *output; + struct qxl_head *head; + + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + output = drm_connector_to_qxl_output(connector); + + head = &qdev->client_monitors_config->heads[output->index]; + + drm_object_property_set_value(&connector->base, + dev->mode_config.suggested_x_property, head->x); + drm_object_property_set_value(&connector->base, + dev->mode_config.suggested_y_property, head->y); + } +} + +void qxl_display_read_client_monitors_config(struct qxl_device *qdev) +{ + struct drm_device *dev = &qdev->ddev; + struct drm_modeset_acquire_ctx ctx; + int status, retries, ret; + + for (retries = 0; retries < 10; retries++) { + status = qxl_display_copy_rom_client_monitors_config(qdev); + if (status != MONITORS_CONFIG_BAD_CRC) + break; + udelay(5); + } + if (status == MONITORS_CONFIG_ERROR) { + DRM_DEBUG_KMS("ignoring client monitors config: error"); + return; + } + if (status == MONITORS_CONFIG_BAD_CRC) { + DRM_DEBUG_KMS("ignoring client monitors config: bad crc"); + return; + } + if (status == MONITORS_CONFIG_UNCHANGED) { + DRM_DEBUG_KMS("ignoring client monitors config: unchanged"); + return; + } + + DRM_MODESET_LOCK_ALL_BEGIN(dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret); + qxl_update_offset_props(qdev); + DRM_MODESET_LOCK_ALL_END(dev, ctx, ret); + if (!drm_helper_hpd_irq_event(dev)) { + /* notify that the monitor configuration changed, to + adjust at the arbitrary resolution */ + drm_kms_helper_hotplug_event(dev); + } +} + +static int qxl_check_mode(struct qxl_device *qdev, + unsigned int width, + unsigned int height) +{ + unsigned int stride; + unsigned int size; + + if (check_mul_overflow(width, 4u, &stride)) + return -EINVAL; + if (check_mul_overflow(stride, height, &size)) + return -EINVAL; + if (size > qdev->vram_size) + return -ENOMEM; + return 0; +} + +static int qxl_check_framebuffer(struct qxl_device *qdev, + struct qxl_bo *bo) +{ + return qxl_check_mode(qdev, bo->surf.width, bo->surf.height); +} + +static int qxl_add_mode(struct drm_connector *connector, + unsigned int width, + unsigned int height, + bool preferred) +{ + struct drm_device *dev = connector->dev; + struct qxl_device *qdev = to_qxl(dev); + struct drm_display_mode *mode = NULL; + int rc; + + rc = qxl_check_mode(qdev, width, height); + if (rc != 0) + return 0; + + mode = drm_cvt_mode(dev, width, height, 60, false, false, false); + if (preferred) + mode->type |= DRM_MODE_TYPE_PREFERRED; + mode->hdisplay = width; + mode->vdisplay = height; + drm_mode_set_name(mode); + drm_mode_probed_add(connector, mode); + return 1; +} + +static int qxl_add_monitors_config_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct qxl_device *qdev = to_qxl(dev); + struct qxl_output *output = drm_connector_to_qxl_output(connector); + int h = output->index; + struct qxl_head *head; + + if (!qdev->monitors_config) + return 0; + if (h >= qxl_num_crtc) + return 0; + if (!qdev->client_monitors_config) + return 0; + if (h >= qdev->client_monitors_config->count) + return 0; + + head = &qdev->client_monitors_config->heads[h]; + DRM_DEBUG_KMS("head %d is %dx%d\n", h, head->width, head->height); + + return qxl_add_mode(connector, head->width, head->height, true); +} + +static struct mode_size { + int w; + int h; +} extra_modes[] = { + { 720, 480}, + {1152, 768}, + {1280, 854}, +}; + +static int qxl_add_extra_modes(struct drm_connector *connector) +{ + int i, ret = 0; + + for (i = 0; i < ARRAY_SIZE(extra_modes); i++) + ret += qxl_add_mode(connector, + extra_modes[i].w, + extra_modes[i].h, + false); + return ret; +} + +static void qxl_send_monitors_config(struct qxl_device *qdev) +{ + int i; + + BUG_ON(!qdev->ram_header->monitors_config); + + if (qdev->monitors_config->count == 0) + return; + + for (i = 0 ; i < qdev->monitors_config->count ; ++i) { + struct qxl_head *head = &qdev->monitors_config->heads[i]; + + if (head->y > 8192 || head->x > 8192 || + head->width > 8192 || head->height > 8192) { + DRM_ERROR("head %d wrong: %dx%d+%d+%d\n", + i, head->width, head->height, + head->x, head->y); + return; + } + } + qxl_io_monitors_config(qdev); +} + +static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc, + const char *reason) +{ + struct drm_device *dev = crtc->dev; + struct qxl_device *qdev = to_qxl(dev); + struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); + struct qxl_head head; + int oldcount, i = qcrtc->index; + + if (!qdev->primary_bo) { + DRM_DEBUG_KMS("no primary surface, skip (%s)\n", reason); + return; + } + + if (!qdev->monitors_config || qxl_num_crtc <= i) + return; + + head.id = i; + head.flags = 0; + head.surface_id = 0; + oldcount = qdev->monitors_config->count; + if (crtc->state->active) { + struct drm_display_mode *mode = &crtc->mode; + + head.width = mode->hdisplay; + head.height = mode->vdisplay; + head.x = crtc->x; + head.y = crtc->y; + if (qdev->monitors_config->count < i + 1) + qdev->monitors_config->count = i + 1; + if (qdev->primary_bo == qdev->dumb_shadow_bo) + head.x += qdev->dumb_heads[i].x; + } else if (i > 0) { + head.width = 0; + head.height = 0; + head.x = 0; + head.y = 0; + if (qdev->monitors_config->count == i + 1) + qdev->monitors_config->count = i; + } else { + DRM_DEBUG_KMS("inactive head 0, skip (%s)\n", reason); + return; + } + + if (head.width == qdev->monitors_config->heads[i].width && + head.height == qdev->monitors_config->heads[i].height && + head.x == qdev->monitors_config->heads[i].x && + head.y == qdev->monitors_config->heads[i].y && + oldcount == qdev->monitors_config->count) + return; + + DRM_DEBUG_KMS("head %d, %dx%d, at +%d+%d, %s (%s)\n", + i, head.width, head.height, head.x, head.y, + crtc->state->active ? "on" : "off", reason); + if (oldcount != qdev->monitors_config->count) + DRM_DEBUG_KMS("active heads %d -> %d (%d total)\n", + oldcount, qdev->monitors_config->count, + qxl_num_crtc); + + qdev->monitors_config->heads[i] = head; + qdev->monitors_config->max_allowed = qxl_num_crtc; + qxl_send_monitors_config(qdev); +} + +static void qxl_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + qxl_crtc_update_monitors_config(crtc, "flush"); +} + +static void qxl_crtc_destroy(struct drm_crtc *crtc) +{ + struct qxl_crtc *qxl_crtc = to_qxl_crtc(crtc); + + qxl_bo_unref(&qxl_crtc->cursor_bo); + drm_crtc_cleanup(crtc); + kfree(qxl_crtc); +} + +static const struct drm_crtc_funcs qxl_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = qxl_crtc_destroy, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned int flags, unsigned int color, + struct drm_clip_rect *clips, + unsigned int num_clips) +{ + /* TODO: vmwgfx where this was cribbed from had locking. Why? */ + struct qxl_device *qdev = to_qxl(fb->dev); + struct drm_clip_rect norect; + struct qxl_bo *qobj; + struct drm_modeset_acquire_ctx ctx; + bool is_primary; + int inc = 1, ret; + + DRM_MODESET_LOCK_ALL_BEGIN(fb->dev, ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE, ret); + + qobj = gem_to_qxl_bo(fb->obj[0]); + /* if we aren't primary surface ignore this */ + is_primary = qobj->shadow ? qobj->shadow->is_primary : qobj->is_primary; + if (!is_primary) + goto out_lock_end; + + if (!num_clips) { + num_clips = 1; + clips = &norect; + norect.x1 = norect.y1 = 0; + norect.x2 = fb->width; + norect.y2 = fb->height; + } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { + num_clips /= 2; + inc = 2; /* skip source rects */ + } + + qxl_draw_dirty_fb(qdev, fb, qobj, flags, color, + clips, num_clips, inc, 0); + +out_lock_end: + DRM_MODESET_LOCK_ALL_END(fb->dev, ctx, ret); + + return 0; +} + +static const struct drm_framebuffer_funcs qxl_fb_funcs = { + .destroy = drm_gem_fb_destroy, + .dirty = qxl_framebuffer_surface_dirty, + .create_handle = drm_gem_fb_create_handle, +}; + +static void qxl_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + qxl_crtc_update_monitors_config(crtc, "enable"); +} + +static void qxl_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + qxl_crtc_update_monitors_config(crtc, "disable"); +} + +static const struct drm_crtc_helper_funcs qxl_crtc_helper_funcs = { + .atomic_flush = qxl_crtc_atomic_flush, + .atomic_enable = qxl_crtc_atomic_enable, + .atomic_disable = qxl_crtc_atomic_disable, +}; + +static int qxl_primary_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct qxl_device *qdev = to_qxl(plane->dev); + struct qxl_bo *bo; + + if (!new_plane_state->crtc || !new_plane_state->fb) + return 0; + + bo = gem_to_qxl_bo(new_plane_state->fb->obj[0]); + + return qxl_check_framebuffer(qdev, bo); +} + +static int qxl_primary_apply_cursor(struct qxl_device *qdev, + struct drm_plane_state *plane_state) +{ + struct drm_framebuffer *fb = plane_state->fb; + struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc); + struct qxl_cursor_cmd *cmd; + struct qxl_release *release; + int ret = 0; + + if (!qcrtc->cursor_bo) + return 0; + + ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), + QXL_RELEASE_CURSOR_CMD, + &release, NULL); + if (ret) + return ret; + + ret = qxl_release_list_add(release, qcrtc->cursor_bo); + if (ret) + goto out_free_release; + + ret = qxl_release_reserve_list(release, false); + if (ret) + goto out_free_release; + + cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); + cmd->type = QXL_CURSOR_SET; + cmd->u.set.position.x = plane_state->crtc_x + fb->hot_x; + cmd->u.set.position.y = plane_state->crtc_y + fb->hot_y; + + cmd->u.set.shape = qxl_bo_physical_address(qdev, qcrtc->cursor_bo, 0); + + cmd->u.set.visible = 1; + qxl_release_unmap(qdev, release, &cmd->release_info); + + qxl_release_fence_buffer_objects(release); + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); + + return ret; + +out_free_release: + qxl_release_free(qdev, release); + return ret; +} + +static int qxl_primary_move_cursor(struct qxl_device *qdev, + struct drm_plane_state *plane_state) +{ + struct drm_framebuffer *fb = plane_state->fb; + struct qxl_crtc *qcrtc = to_qxl_crtc(plane_state->crtc); + struct qxl_cursor_cmd *cmd; + struct qxl_release *release; + int ret = 0; + + if (!qcrtc->cursor_bo) + return 0; + + ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), + QXL_RELEASE_CURSOR_CMD, + &release, NULL); + if (ret) + return ret; + + ret = qxl_release_reserve_list(release, true); + if (ret) { + qxl_release_free(qdev, release); + return ret; + } + + cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); + cmd->type = QXL_CURSOR_MOVE; + cmd->u.position.x = plane_state->crtc_x + fb->hot_x; + cmd->u.position.y = plane_state->crtc_y + fb->hot_y; + qxl_release_unmap(qdev, release, &cmd->release_info); + + qxl_release_fence_buffer_objects(release); + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); + return ret; +} + +static struct qxl_bo *qxl_create_cursor(struct qxl_device *qdev, + struct qxl_bo *user_bo, + int hot_x, int hot_y) +{ + static const u32 size = 64 * 64 * 4; + struct qxl_bo *cursor_bo; + struct iosys_map cursor_map; + struct iosys_map user_map; + struct qxl_cursor cursor; + int ret; + + if (!user_bo) + return NULL; + + ret = qxl_bo_create(qdev, sizeof(struct qxl_cursor) + size, + false, true, QXL_GEM_DOMAIN_VRAM, 1, + NULL, &cursor_bo); + if (ret) + goto err; + + ret = qxl_bo_vmap(cursor_bo, &cursor_map); + if (ret) + goto err_unref; + + ret = qxl_bo_vmap(user_bo, &user_map); + if (ret) + goto err_unmap; + + cursor.header.unique = 0; + cursor.header.type = SPICE_CURSOR_TYPE_ALPHA; + cursor.header.width = 64; + cursor.header.height = 64; + cursor.header.hot_spot_x = hot_x; + cursor.header.hot_spot_y = hot_y; + cursor.data_size = size; + cursor.chunk.next_chunk = 0; + cursor.chunk.prev_chunk = 0; + cursor.chunk.data_size = size; + if (cursor_map.is_iomem) { + memcpy_toio(cursor_map.vaddr_iomem, + &cursor, sizeof(cursor)); + memcpy_toio(cursor_map.vaddr_iomem + sizeof(cursor), + user_map.vaddr, size); + } else { + memcpy(cursor_map.vaddr, + &cursor, sizeof(cursor)); + memcpy(cursor_map.vaddr + sizeof(cursor), + user_map.vaddr, size); + } + + qxl_bo_vunmap(user_bo); + qxl_bo_vunmap(cursor_bo); + return cursor_bo; + +err_unmap: + qxl_bo_vunmap(cursor_bo); +err_unref: + qxl_bo_unpin(cursor_bo); + qxl_bo_unref(&cursor_bo); +err: + return NULL; +} + +static void qxl_free_cursor(struct qxl_bo *cursor_bo) +{ + if (!cursor_bo) + return; + + qxl_bo_unpin(cursor_bo); + qxl_bo_unref(&cursor_bo); +} + +static void qxl_primary_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct qxl_device *qdev = to_qxl(plane->dev); + struct qxl_bo *bo = gem_to_qxl_bo(new_state->fb->obj[0]); + struct qxl_bo *primary; + struct drm_clip_rect norect = { + .x1 = 0, + .y1 = 0, + .x2 = new_state->fb->width, + .y2 = new_state->fb->height + }; + uint32_t dumb_shadow_offset = 0; + + primary = bo->shadow ? bo->shadow : bo; + + if (!primary->is_primary) { + if (qdev->primary_bo) + qxl_io_destroy_primary(qdev); + qxl_io_create_primary(qdev, primary); + qxl_primary_apply_cursor(qdev, plane->state); + } + + if (bo->is_dumb) + dumb_shadow_offset = + qdev->dumb_heads[new_state->crtc->index].x; + + qxl_draw_dirty_fb(qdev, new_state->fb, bo, 0, 0, &norect, 1, 1, + dumb_shadow_offset); +} + +static void qxl_primary_atomic_disable(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct qxl_device *qdev = to_qxl(plane->dev); + + if (old_state->fb) { + struct qxl_bo *bo = gem_to_qxl_bo(old_state->fb->obj[0]); + + if (bo->shadow) + bo = bo->shadow; + if (bo->is_primary) + qxl_io_destroy_primary(qdev); + } +} + +static void qxl_cursor_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct qxl_device *qdev = to_qxl(plane->dev); + struct drm_framebuffer *fb = new_state->fb; + + if (fb != old_state->fb) { + qxl_primary_apply_cursor(qdev, new_state); + } else { + qxl_primary_move_cursor(qdev, new_state); + } +} + +static void qxl_cursor_atomic_disable(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct qxl_device *qdev = to_qxl(plane->dev); + struct qxl_crtc *qcrtc; + struct qxl_release *release; + struct qxl_cursor_cmd *cmd; + int ret; + + ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), + QXL_RELEASE_CURSOR_CMD, + &release, NULL); + if (ret) + return; + + ret = qxl_release_reserve_list(release, true); + if (ret) { + qxl_release_free(qdev, release); + return; + } + + cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); + cmd->type = QXL_CURSOR_HIDE; + qxl_release_unmap(qdev, release, &cmd->release_info); + + qxl_release_fence_buffer_objects(release); + qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); + + qcrtc = to_qxl_crtc(old_state->crtc); + qxl_free_cursor(qcrtc->cursor_bo); + qcrtc->cursor_bo = NULL; +} + +static void qxl_update_dumb_head(struct qxl_device *qdev, + int index, struct qxl_bo *bo) +{ + uint32_t width, height; + + if (index >= qdev->monitors_config->max_allowed) + return; + + if (bo && bo->is_dumb) { + width = bo->surf.width; + height = bo->surf.height; + } else { + width = 0; + height = 0; + } + + if (qdev->dumb_heads[index].width == width && + qdev->dumb_heads[index].height == height) + return; + + DRM_DEBUG("#%d: %dx%d -> %dx%d\n", index, + qdev->dumb_heads[index].width, + qdev->dumb_heads[index].height, + width, height); + qdev->dumb_heads[index].width = width; + qdev->dumb_heads[index].height = height; +} + +static void qxl_calc_dumb_shadow(struct qxl_device *qdev, + struct qxl_surface *surf) +{ + struct qxl_head *head; + int i; + + memset(surf, 0, sizeof(*surf)); + for (i = 0; i < qdev->monitors_config->max_allowed; i++) { + head = qdev->dumb_heads + i; + head->x = surf->width; + surf->width += head->width; + if (surf->height < head->height) + surf->height = head->height; + } + if (surf->width < 64) + surf->width = 64; + if (surf->height < 64) + surf->height = 64; + surf->format = SPICE_SURFACE_FMT_32_xRGB; + surf->stride = surf->width * 4; + + if (!qdev->dumb_shadow_bo || + qdev->dumb_shadow_bo->surf.width != surf->width || + qdev->dumb_shadow_bo->surf.height != surf->height) + DRM_DEBUG("%dx%d\n", surf->width, surf->height); +} + +static void qxl_prepare_shadow(struct qxl_device *qdev, struct qxl_bo *user_bo, + int crtc_index) +{ + struct qxl_surface surf; + + qxl_update_dumb_head(qdev, crtc_index, + user_bo); + qxl_calc_dumb_shadow(qdev, &surf); + if (!qdev->dumb_shadow_bo || + qdev->dumb_shadow_bo->surf.width != surf.width || + qdev->dumb_shadow_bo->surf.height != surf.height) { + if (qdev->dumb_shadow_bo) { + qxl_bo_unpin(qdev->dumb_shadow_bo); + drm_gem_object_put + (&qdev->dumb_shadow_bo->tbo.base); + qdev->dumb_shadow_bo = NULL; + } + qxl_bo_create(qdev, surf.height * surf.stride, + true, true, QXL_GEM_DOMAIN_SURFACE, 0, + &surf, &qdev->dumb_shadow_bo); + } + if (user_bo->shadow != qdev->dumb_shadow_bo) { + if (user_bo->shadow) { + qxl_bo_unpin(user_bo->shadow); + drm_gem_object_put + (&user_bo->shadow->tbo.base); + user_bo->shadow = NULL; + } + drm_gem_object_get(&qdev->dumb_shadow_bo->tbo.base); + user_bo->shadow = qdev->dumb_shadow_bo; + qxl_bo_pin(user_bo->shadow); + } +} + +static int qxl_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct qxl_device *qdev = to_qxl(plane->dev); + struct drm_gem_object *obj; + struct qxl_bo *user_bo; + int ret; + + if (!new_state->fb) + return 0; + + obj = new_state->fb->obj[0]; + user_bo = gem_to_qxl_bo(obj); + + if (plane->type == DRM_PLANE_TYPE_PRIMARY && + user_bo->is_dumb) { + qxl_prepare_shadow(qdev, user_bo, new_state->crtc->index); + } + + if (plane->type == DRM_PLANE_TYPE_CURSOR && + plane->state->fb != new_state->fb) { + struct qxl_crtc *qcrtc = to_qxl_crtc(new_state->crtc); + struct qxl_bo *old_cursor_bo = qcrtc->cursor_bo; + + qcrtc->cursor_bo = qxl_create_cursor(qdev, user_bo, + new_state->fb->hot_x, + new_state->fb->hot_y); + qxl_free_cursor(old_cursor_bo); + } + + ret = qxl_bo_pin(user_bo); + if (ret) + return ret; + + return drm_gem_plane_helper_prepare_fb(plane, new_state); +} + +static void qxl_plane_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct drm_gem_object *obj; + struct qxl_bo *user_bo; + + if (!old_state->fb) { + /* + * we never executed prepare_fb, so there's nothing to + * unpin. + */ + return; + } + + obj = old_state->fb->obj[0]; + user_bo = gem_to_qxl_bo(obj); + qxl_bo_unpin(user_bo); + + if (old_state->fb != plane->state->fb && user_bo->shadow) { + qxl_bo_unpin(user_bo->shadow); + drm_gem_object_put(&user_bo->shadow->tbo.base); + user_bo->shadow = NULL; + } +} + +static const uint32_t qxl_cursor_plane_formats[] = { + DRM_FORMAT_ARGB8888, +}; + +static const struct drm_plane_helper_funcs qxl_cursor_helper_funcs = { + .atomic_update = qxl_cursor_atomic_update, + .atomic_disable = qxl_cursor_atomic_disable, + .prepare_fb = qxl_plane_prepare_fb, + .cleanup_fb = qxl_plane_cleanup_fb, +}; + +static const struct drm_plane_funcs qxl_cursor_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_helper_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static const uint32_t qxl_primary_plane_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, +}; + +static const struct drm_plane_helper_funcs primary_helper_funcs = { + .atomic_check = qxl_primary_atomic_check, + .atomic_update = qxl_primary_atomic_update, + .atomic_disable = qxl_primary_atomic_disable, + .prepare_fb = qxl_plane_prepare_fb, + .cleanup_fb = qxl_plane_cleanup_fb, +}; + +static const struct drm_plane_funcs qxl_primary_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_helper_destroy, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static struct drm_plane *qxl_create_plane(struct qxl_device *qdev, + unsigned int possible_crtcs, + enum drm_plane_type type) +{ + const struct drm_plane_helper_funcs *helper_funcs = NULL; + struct drm_plane *plane; + const struct drm_plane_funcs *funcs; + const uint32_t *formats; + int num_formats; + int err; + + if (type == DRM_PLANE_TYPE_PRIMARY) { + funcs = &qxl_primary_plane_funcs; + formats = qxl_primary_plane_formats; + num_formats = ARRAY_SIZE(qxl_primary_plane_formats); + helper_funcs = &primary_helper_funcs; + } else if (type == DRM_PLANE_TYPE_CURSOR) { + funcs = &qxl_cursor_plane_funcs; + formats = qxl_cursor_plane_formats; + helper_funcs = &qxl_cursor_helper_funcs; + num_formats = ARRAY_SIZE(qxl_cursor_plane_formats); + } else { + return ERR_PTR(-EINVAL); + } + + plane = kzalloc(sizeof(*plane), GFP_KERNEL); + if (!plane) + return ERR_PTR(-ENOMEM); + + err = drm_universal_plane_init(&qdev->ddev, plane, possible_crtcs, + funcs, formats, num_formats, + NULL, type, NULL); + if (err) + goto free_plane; + + drm_plane_helper_add(plane, helper_funcs); + + return plane; + +free_plane: + kfree(plane); + return ERR_PTR(-EINVAL); +} + +static int qdev_crtc_init(struct drm_device *dev, int crtc_id) +{ + struct qxl_crtc *qxl_crtc; + struct drm_plane *primary, *cursor; + struct qxl_device *qdev = to_qxl(dev); + int r; + + qxl_crtc = kzalloc(sizeof(struct qxl_crtc), GFP_KERNEL); + if (!qxl_crtc) + return -ENOMEM; + + primary = qxl_create_plane(qdev, 1 << crtc_id, DRM_PLANE_TYPE_PRIMARY); + if (IS_ERR(primary)) { + r = -ENOMEM; + goto free_mem; + } + + cursor = qxl_create_plane(qdev, 1 << crtc_id, DRM_PLANE_TYPE_CURSOR); + if (IS_ERR(cursor)) { + r = -ENOMEM; + goto clean_primary; + } + + r = drm_crtc_init_with_planes(dev, &qxl_crtc->base, primary, cursor, + &qxl_crtc_funcs, NULL); + if (r) + goto clean_cursor; + + qxl_crtc->index = crtc_id; + drm_crtc_helper_add(&qxl_crtc->base, &qxl_crtc_helper_funcs); + return 0; + +clean_cursor: + drm_plane_cleanup(cursor); + kfree(cursor); +clean_primary: + drm_plane_cleanup(primary); + kfree(primary); +free_mem: + kfree(qxl_crtc); + return r; +} + +static int qxl_conn_get_modes(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct qxl_device *qdev = to_qxl(dev); + struct qxl_output *output = drm_connector_to_qxl_output(connector); + unsigned int pwidth = 1024; + unsigned int pheight = 768; + int ret = 0; + + if (qdev->client_monitors_config) { + struct qxl_head *head; + head = &qdev->client_monitors_config->heads[output->index]; + if (head->width) + pwidth = head->width; + if (head->height) + pheight = head->height; + } + + ret += drm_add_modes_noedid(connector, 8192, 8192); + ret += qxl_add_extra_modes(connector); + ret += qxl_add_monitors_config_modes(connector); + drm_set_preferred_mode(connector, pwidth, pheight); + return ret; +} + +static enum drm_mode_status qxl_conn_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct drm_device *ddev = connector->dev; + struct qxl_device *qdev = to_qxl(ddev); + + if (qxl_check_mode(qdev, mode->hdisplay, mode->vdisplay) != 0) + return MODE_BAD; + + return MODE_OK; +} + +static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector) +{ + struct qxl_output *qxl_output = + drm_connector_to_qxl_output(connector); + + DRM_DEBUG("\n"); + return &qxl_output->enc; +} + +static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = { + .get_modes = qxl_conn_get_modes, + .mode_valid = qxl_conn_mode_valid, + .best_encoder = qxl_best_encoder, +}; + +static enum drm_connector_status qxl_conn_detect( + struct drm_connector *connector, + bool force) +{ + struct qxl_output *output = + drm_connector_to_qxl_output(connector); + struct drm_device *ddev = connector->dev; + struct qxl_device *qdev = to_qxl(ddev); + bool connected = false; + + /* The first monitor is always connected */ + if (!qdev->client_monitors_config) { + if (output->index == 0) + connected = true; + } else + connected = qdev->client_monitors_config->count > output->index && + qxl_head_enabled(&qdev->client_monitors_config->heads[output->index]); + + DRM_DEBUG("#%d connected: %d\n", output->index, connected); + + return connected ? connector_status_connected + : connector_status_disconnected; +} + +static void qxl_conn_destroy(struct drm_connector *connector) +{ + struct qxl_output *qxl_output = + drm_connector_to_qxl_output(connector); + + drm_connector_unregister(connector); + drm_connector_cleanup(connector); + kfree(qxl_output); +} + +static const struct drm_connector_funcs qxl_connector_funcs = { + .detect = qxl_conn_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = qxl_conn_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev) +{ + if (qdev->hotplug_mode_update_property) + return 0; + + qdev->hotplug_mode_update_property = + drm_property_create_range(&qdev->ddev, DRM_MODE_PROP_IMMUTABLE, + "hotplug_mode_update", 0, 1); + + return 0; +} + +static int qdev_output_init(struct drm_device *dev, int num_output) +{ + struct qxl_device *qdev = to_qxl(dev); + struct qxl_output *qxl_output; + struct drm_connector *connector; + struct drm_encoder *encoder; + int ret; + + qxl_output = kzalloc(sizeof(struct qxl_output), GFP_KERNEL); + if (!qxl_output) + return -ENOMEM; + + qxl_output->index = num_output; + + connector = &qxl_output->base; + encoder = &qxl_output->enc; + drm_connector_init(dev, &qxl_output->base, + &qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL); + + ret = drm_simple_encoder_init(dev, &qxl_output->enc, + DRM_MODE_ENCODER_VIRTUAL); + if (ret) { + drm_err(dev, "drm_simple_encoder_init() failed, error %d\n", + ret); + goto err_drm_connector_cleanup; + } + + /* we get HPD via client monitors config */ + connector->polled = DRM_CONNECTOR_POLL_HPD; + encoder->possible_crtcs = 1 << num_output; + drm_connector_attach_encoder(&qxl_output->base, + &qxl_output->enc); + drm_connector_helper_add(connector, &qxl_connector_helper_funcs); + + drm_object_attach_property(&connector->base, + qdev->hotplug_mode_update_property, 0); + drm_object_attach_property(&connector->base, + dev->mode_config.suggested_x_property, 0); + drm_object_attach_property(&connector->base, + dev->mode_config.suggested_y_property, 0); + return 0; + +err_drm_connector_cleanup: + drm_connector_cleanup(&qxl_output->base); + kfree(qxl_output); + return ret; +} + +static struct drm_framebuffer * +qxl_user_framebuffer_create(struct drm_device *dev, + struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + return drm_gem_fb_create_with_funcs(dev, file_priv, mode_cmd, + &qxl_fb_funcs); +} + +static const struct drm_mode_config_funcs qxl_mode_funcs = { + .fb_create = qxl_user_framebuffer_create, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +int qxl_create_monitors_object(struct qxl_device *qdev) +{ + int ret; + struct drm_gem_object *gobj; + struct iosys_map map; + int monitors_config_size = sizeof(struct qxl_monitors_config) + + qxl_num_crtc * sizeof(struct qxl_head); + + ret = qxl_gem_object_create(qdev, monitors_config_size, 0, + QXL_GEM_DOMAIN_VRAM, + false, false, NULL, &gobj); + if (ret) { + DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); + return -ENOMEM; + } + qdev->monitors_config_bo = gem_to_qxl_bo(gobj); + + ret = qxl_bo_vmap(qdev->monitors_config_bo, &map); + if (ret) + return ret; + + qdev->monitors_config = qdev->monitors_config_bo->kptr; + qdev->ram_header->monitors_config = + qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0); + + memset(qdev->monitors_config, 0, monitors_config_size); + qdev->dumb_heads = kcalloc(qxl_num_crtc, sizeof(qdev->dumb_heads[0]), + GFP_KERNEL); + if (!qdev->dumb_heads) { + qxl_destroy_monitors_object(qdev); + return -ENOMEM; + } + return 0; +} + +int qxl_destroy_monitors_object(struct qxl_device *qdev) +{ + int ret; + + if (!qdev->monitors_config_bo) + return 0; + + kfree(qdev->dumb_heads); + qdev->dumb_heads = NULL; + + qdev->monitors_config = NULL; + qdev->ram_header->monitors_config = 0; + + ret = qxl_bo_vunmap(qdev->monitors_config_bo); + if (ret) + return ret; + + qxl_bo_unref(&qdev->monitors_config_bo); + return 0; +} + +int qxl_modeset_init(struct qxl_device *qdev) +{ + int i; + int ret; + + ret = drmm_mode_config_init(&qdev->ddev); + if (ret) + return ret; + + ret = qxl_create_monitors_object(qdev); + if (ret) + return ret; + + qdev->ddev.mode_config.funcs = (void *)&qxl_mode_funcs; + + /* modes will be validated against the framebuffer size */ + qdev->ddev.mode_config.min_width = 0; + qdev->ddev.mode_config.min_height = 0; + qdev->ddev.mode_config.max_width = 8192; + qdev->ddev.mode_config.max_height = 8192; + + qdev->ddev.mode_config.fb_base = qdev->vram_base; + + drm_mode_create_suggested_offset_properties(&qdev->ddev); + qxl_mode_create_hotplug_mode_update_property(qdev); + + for (i = 0 ; i < qxl_num_crtc; ++i) { + qdev_crtc_init(&qdev->ddev, i); + qdev_output_init(&qdev->ddev, i); + } + + qxl_display_read_client_monitors_config(qdev); + + drm_mode_config_reset(&qdev->ddev); + return 0; +} + +void qxl_modeset_fini(struct qxl_device *qdev) +{ + if (qdev->dumb_shadow_bo) { + qxl_bo_unpin(qdev->dumb_shadow_bo); + drm_gem_object_put(&qdev->dumb_shadow_bo->tbo.base); + qdev->dumb_shadow_bo = NULL; + } + qxl_destroy_monitors_object(qdev); +} diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c new file mode 100644 index 000000000..3a3e127ce --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_draw.c @@ -0,0 +1,268 @@ +/* + * Copyright 2011 Red Hat, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * on the rights to use, copy, modify, merge, publish, distribute, sub + * license, and/or sell copies of the Software, and to permit persons to whom + * the Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include + +#include +#include + +#include "qxl_drv.h" +#include "qxl_object.h" + +static int alloc_clips(struct qxl_device *qdev, + struct qxl_release *release, + unsigned int num_clips, + struct qxl_bo **clips_bo) +{ + int size = sizeof(struct qxl_clip_rects) + sizeof(struct qxl_rect) * num_clips; + + return qxl_alloc_bo_reserved(qdev, release, size, clips_bo); +} + +/* returns a pointer to the already allocated qxl_rect array inside + * the qxl_clip_rects. This is *not* the same as the memory allocated + * on the device, it is offset to qxl_clip_rects.chunk.data */ +static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, + unsigned int num_clips, + struct qxl_bo *clips_bo) +{ + struct iosys_map map; + struct qxl_clip_rects *dev_clips; + int ret; + + ret = qxl_bo_vmap_locked(clips_bo, &map); + if (ret) + return NULL; + dev_clips = map.vaddr; /* TODO: Use mapping abstraction properly */ + + dev_clips->num_rects = num_clips; + dev_clips->chunk.next_chunk = 0; + dev_clips->chunk.prev_chunk = 0; + dev_clips->chunk.data_size = sizeof(struct qxl_rect) * num_clips; + return (struct qxl_rect *)dev_clips->chunk.data; +} + +static int +alloc_drawable(struct qxl_device *qdev, struct qxl_release **release) +{ + return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable), + QXL_RELEASE_DRAWABLE, release, NULL); +} + +static void +free_drawable(struct qxl_device *qdev, struct qxl_release *release) +{ + qxl_release_free(qdev, release); +} + +/* release needs to be reserved at this point */ +static int +make_drawable(struct qxl_device *qdev, int surface, uint8_t type, + const struct qxl_rect *rect, + struct qxl_release *release) +{ + struct qxl_drawable *drawable; + int i; + + drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); + if (!drawable) + return -ENOMEM; + + drawable->type = type; + + drawable->surface_id = surface; /* Only primary for now */ + drawable->effect = QXL_EFFECT_OPAQUE; + drawable->self_bitmap = 0; + drawable->self_bitmap_area.top = 0; + drawable->self_bitmap_area.left = 0; + drawable->self_bitmap_area.bottom = 0; + drawable->self_bitmap_area.right = 0; + /* FIXME: add clipping */ + drawable->clip.type = SPICE_CLIP_TYPE_NONE; + + /* + * surfaces_dest[i] should apparently be filled out with the + * surfaces that we depend on, and surface_rects should be + * filled with the rectangles of those surfaces that we + * are going to use. + */ + for (i = 0; i < 3; ++i) + drawable->surfaces_dest[i] = -1; + + if (rect) + drawable->bbox = *rect; + + drawable->mm_time = qdev->rom->mm_clock; + qxl_release_unmap(qdev, release, &drawable->release_info); + return 0; +} + +/* push a draw command using the given clipping rectangles as + * the sources from the shadow framebuffer. + * + * Right now implementing with a single draw and a clip list. Clip + * lists are known to be a problem performance wise, this can be solved + * by treating them differently in the server. + */ +void qxl_draw_dirty_fb(struct qxl_device *qdev, + struct drm_framebuffer *fb, + struct qxl_bo *bo, + unsigned int flags, unsigned int color, + struct drm_clip_rect *clips, + unsigned int num_clips, int inc, + uint32_t dumb_shadow_offset) +{ + /* + * TODO: if flags & DRM_MODE_FB_DIRTY_ANNOTATE_FILL then we should + * send a fill command instead, much cheaper. + * + * See include/drm/drm_mode.h + */ + struct drm_clip_rect *clips_ptr; + int i; + int left, right, top, bottom; + int width, height; + struct qxl_drawable *drawable; + struct qxl_rect drawable_rect; + struct qxl_rect *rects; + int stride = fb->pitches[0]; + /* depth is not actually interesting, we don't mask with it */ + int depth = fb->format->cpp[0] * 8; + struct iosys_map surface_map; + uint8_t *surface_base; + struct qxl_release *release; + struct qxl_bo *clips_bo; + struct qxl_drm_image *dimage; + int ret; + + ret = alloc_drawable(qdev, &release); + if (ret) + return; + + clips->x1 += dumb_shadow_offset; + clips->x2 += dumb_shadow_offset; + + left = clips->x1; + right = clips->x2; + top = clips->y1; + bottom = clips->y2; + + /* skip the first clip rect */ + for (i = 1, clips_ptr = clips + inc; + i < num_clips; i++, clips_ptr += inc) { + left = min_t(int, left, (int)clips_ptr->x1); + right = max_t(int, right, (int)clips_ptr->x2); + top = min_t(int, top, (int)clips_ptr->y1); + bottom = max_t(int, bottom, (int)clips_ptr->y2); + } + + width = right - left; + height = bottom - top; + + ret = alloc_clips(qdev, release, num_clips, &clips_bo); + if (ret) + goto out_free_drawable; + + ret = qxl_image_alloc_objects(qdev, release, + &dimage, + height, stride); + if (ret) + goto out_free_clips; + + /* do a reservation run over all the objects we just allocated */ + ret = qxl_release_reserve_list(release, true); + if (ret) + goto out_free_image; + + drawable_rect.left = left; + drawable_rect.right = right; + drawable_rect.top = top; + drawable_rect.bottom = bottom; + + ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect, + release); + if (ret) + goto out_release_backoff; + + ret = qxl_bo_vmap_locked(bo, &surface_map); + if (ret) + goto out_release_backoff; + surface_base = surface_map.vaddr; /* TODO: Use mapping abstraction properly */ + + ret = qxl_image_init(qdev, release, dimage, surface_base, + left - dumb_shadow_offset, + top, width, height, depth, stride); + qxl_bo_vunmap_locked(bo); + if (ret) + goto out_release_backoff; + + rects = drawable_set_clipping(qdev, num_clips, clips_bo); + if (!rects) { + ret = -EINVAL; + goto out_release_backoff; + } + drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); + + drawable->clip.type = SPICE_CLIP_TYPE_RECTS; + drawable->clip.data = qxl_bo_physical_address(qdev, + clips_bo, 0); + + drawable->u.copy.src_area.top = 0; + drawable->u.copy.src_area.bottom = height; + drawable->u.copy.src_area.left = 0; + drawable->u.copy.src_area.right = width; + + drawable->u.copy.rop_descriptor = SPICE_ROPD_OP_PUT; + drawable->u.copy.scale_mode = 0; + drawable->u.copy.mask.flags = 0; + drawable->u.copy.mask.pos.x = 0; + drawable->u.copy.mask.pos.y = 0; + drawable->u.copy.mask.bitmap = 0; + + drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, dimage->bo, 0); + qxl_release_unmap(qdev, release, &drawable->release_info); + + clips_ptr = clips; + for (i = 0; i < num_clips; i++, clips_ptr += inc) { + rects[i].left = clips_ptr->x1; + rects[i].right = clips_ptr->x2; + rects[i].top = clips_ptr->y1; + rects[i].bottom = clips_ptr->y2; + } + qxl_bo_vunmap_locked(clips_bo); + + qxl_release_fence_buffer_objects(release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); + +out_release_backoff: + if (ret) + qxl_release_backoff_reserve_list(release); +out_free_image: + qxl_image_free_objects(qdev, dimage); +out_free_clips: + qxl_bo_unref(&clips_bo); +out_free_drawable: + /* only free drawable on error */ + if (ret) + free_drawable(qdev, release); + +} diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c new file mode 100644 index 000000000..3044ca948 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -0,0 +1,312 @@ +/* qxl_drv.c -- QXL driver -*- linux-c -*- + * + * Copyright 2011 Red Hat, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: + * Dave Airlie + * Alon Levy + */ + +#include "qxl_drv.h" + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qxl_object.h" + +static const struct pci_device_id pciidlist[] = { + { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, + 0xffff00, 0 }, + { 0x1b36, 0x100, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_OTHER << 8, + 0xffff00, 0 }, + { 0, 0, 0 }, +}; +MODULE_DEVICE_TABLE(pci, pciidlist); + +static int qxl_modeset = -1; +int qxl_num_crtc = 4; + +MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); +module_param_named(modeset, qxl_modeset, int, 0400); + +MODULE_PARM_DESC(num_heads, "Number of virtual crtcs to expose (default 4)"); +module_param_named(num_heads, qxl_num_crtc, int, 0400); + +static struct drm_driver qxl_driver; +static struct pci_driver qxl_pci_driver; + +static bool is_vga(struct pci_dev *pdev) +{ + return pdev->class == PCI_CLASS_DISPLAY_VGA << 8; +} + +static int +qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct qxl_device *qdev; + int ret; + + if (pdev->revision < 4) { + DRM_ERROR("qxl too old, doesn't support client_monitors_config," + " use xf86-video-qxl in user mode"); + return -EINVAL; /* TODO: ENODEV ? */ + } + + qdev = devm_drm_dev_alloc(&pdev->dev, &qxl_driver, + struct qxl_device, ddev); + if (IS_ERR(qdev)) { + pr_err("Unable to init drm dev"); + return -ENOMEM; + } + + ret = pci_enable_device(pdev); + if (ret) + return ret; + + ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &qxl_driver); + if (ret) + goto disable_pci; + + if (is_vga(pdev) && pdev->revision < 5) { + ret = vga_get_interruptible(pdev, VGA_RSRC_LEGACY_IO); + if (ret) { + DRM_ERROR("can't get legacy vga ioports\n"); + goto disable_pci; + } + } + + ret = qxl_device_init(qdev, pdev); + if (ret) + goto put_vga; + + ret = qxl_modeset_init(qdev); + if (ret) + goto unload; + + drm_kms_helper_poll_init(&qdev->ddev); + + /* Complete initialization. */ + ret = drm_dev_register(&qdev->ddev, ent->driver_data); + if (ret) + goto modeset_cleanup; + + drm_fbdev_generic_setup(&qdev->ddev, 32); + return 0; + +modeset_cleanup: + qxl_modeset_fini(qdev); +unload: + qxl_device_fini(qdev); +put_vga: + if (is_vga(pdev) && pdev->revision < 5) + vga_put(pdev, VGA_RSRC_LEGACY_IO); +disable_pci: + pci_disable_device(pdev); + + return ret; +} + +static void qxl_drm_release(struct drm_device *dev) +{ + struct qxl_device *qdev = to_qxl(dev); + + /* + * TODO: qxl_device_fini() call should be in qxl_pci_remove(), + * reordering qxl_modeset_fini() + qxl_device_fini() calls is + * non-trivial though. + */ + qxl_modeset_fini(qdev); + qxl_device_fini(qdev); +} + +static void +qxl_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + drm_dev_unregister(dev); + drm_atomic_helper_shutdown(dev); + if (is_vga(pdev) && pdev->revision < 5) + vga_put(pdev, VGA_RSRC_LEGACY_IO); +} + +DEFINE_DRM_GEM_FOPS(qxl_fops); + +static int qxl_drm_freeze(struct drm_device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev->dev); + struct qxl_device *qdev = to_qxl(dev); + int ret; + + ret = drm_mode_config_helper_suspend(dev); + if (ret) + return ret; + + qxl_destroy_monitors_object(qdev); + qxl_surf_evict(qdev); + qxl_vram_evict(qdev); + + while (!qxl_check_idle(qdev->command_ring)); + while (!qxl_check_idle(qdev->release_ring)) + qxl_queue_garbage_collect(qdev, 1); + + pci_save_state(pdev); + + return 0; +} + +static int qxl_drm_resume(struct drm_device *dev, bool thaw) +{ + struct qxl_device *qdev = to_qxl(dev); + + qdev->ram_header->int_mask = QXL_INTERRUPT_MASK; + if (!thaw) { + qxl_reinit_memslots(qdev); + } + + qxl_create_monitors_object(qdev); + return drm_mode_config_helper_resume(dev); +} + +static int qxl_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + int error; + + error = qxl_drm_freeze(drm_dev); + if (error) + return error; + + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + return 0; +} + +static int qxl_pm_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct qxl_device *qdev = to_qxl(drm_dev); + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + if (pci_enable_device(pdev)) { + return -EIO; + } + + qxl_io_reset(qdev); + return qxl_drm_resume(drm_dev, false); +} + +static int qxl_pm_thaw(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + + return qxl_drm_resume(drm_dev, true); +} + +static int qxl_pm_freeze(struct device *dev) +{ + struct drm_device *drm_dev = dev_get_drvdata(dev); + + return qxl_drm_freeze(drm_dev); +} + +static int qxl_pm_restore(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *drm_dev = pci_get_drvdata(pdev); + struct qxl_device *qdev = to_qxl(drm_dev); + + qxl_io_reset(qdev); + return qxl_drm_resume(drm_dev, false); +} + +static const struct dev_pm_ops qxl_pm_ops = { + .suspend = qxl_pm_suspend, + .resume = qxl_pm_resume, + .freeze = qxl_pm_freeze, + .thaw = qxl_pm_thaw, + .poweroff = qxl_pm_freeze, + .restore = qxl_pm_restore, +}; +static struct pci_driver qxl_pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = qxl_pci_probe, + .remove = qxl_pci_remove, + .driver.pm = &qxl_pm_ops, +}; + +static const struct drm_ioctl_desc qxl_ioctls[] = { + DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl, DRM_AUTH), +}; + +static struct drm_driver qxl_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, + + .dumb_create = qxl_mode_dumb_create, + .dumb_map_offset = drm_gem_ttm_dumb_map_offset, +#if defined(CONFIG_DEBUG_FS) + .debugfs_init = qxl_debugfs_init, +#endif + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table, + .fops = &qxl_fops, + .ioctls = qxl_ioctls, + .num_ioctls = ARRAY_SIZE(qxl_ioctls), + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = 0, + .minor = 1, + .patchlevel = 0, + + .release = qxl_drm_release, +}; + +drm_module_pci_driver_if_modeset(qxl_pci_driver, qxl_modeset); + +MODULE_AUTHOR(DRIVER_AUTHOR); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h new file mode 100644 index 000000000..94753e017 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -0,0 +1,463 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alon Levy + */ + +#ifndef QXL_DRV_H +#define QXL_DRV_H + +/* + * Definitions taken from spice-protocol, plus kernel driver specific bits. + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qxl_dev.h" + +struct iosys_map; + +#define DRIVER_AUTHOR "Dave Airlie" + +#define DRIVER_NAME "qxl" +#define DRIVER_DESC "RH QXL" +#define DRIVER_DATE "20120117" + +#define DRIVER_MAJOR 0 +#define DRIVER_MINOR 1 +#define DRIVER_PATCHLEVEL 0 + +#define QXL_DEBUGFS_MAX_COMPONENTS 32 + +extern int qxl_num_crtc; + +#define QXL_INTERRUPT_MASK (\ + QXL_INTERRUPT_DISPLAY |\ + QXL_INTERRUPT_CURSOR |\ + QXL_INTERRUPT_IO_CMD |\ + QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) + +struct qxl_bo { + struct ttm_buffer_object tbo; + + /* Protected by gem.mutex */ + struct list_head list; + /* Protected by tbo.reserved */ + struct ttm_place placements[3]; + struct ttm_placement placement; + struct iosys_map map; + void *kptr; + unsigned int map_count; + int type; + + /* Constant after initialization */ + unsigned int is_primary:1; /* is this now a primary surface */ + unsigned int is_dumb:1; + struct qxl_bo *shadow; + unsigned int hw_surf_alloc:1; + struct qxl_surface surf; + uint32_t surface_id; + struct qxl_release *surf_create; +}; +#define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, tbo.base) +#define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo) + +struct qxl_gem { + struct mutex mutex; + struct list_head objects; +}; + +struct qxl_bo_list { + struct ttm_validate_buffer tv; +}; + +struct qxl_crtc { + struct drm_crtc base; + int index; + + struct qxl_bo *cursor_bo; +}; + +struct qxl_output { + int index; + struct drm_connector base; + struct drm_encoder enc; +}; + +#define to_qxl_crtc(x) container_of(x, struct qxl_crtc, base) +#define drm_connector_to_qxl_output(x) container_of(x, struct qxl_output, base) +#define drm_encoder_to_qxl_output(x) container_of(x, struct qxl_output, enc) + +struct qxl_mman { + struct ttm_device bdev; +}; + +struct qxl_memslot { + int index; + const char *name; + uint8_t generation; + uint64_t start_phys_addr; + uint64_t size; + uint64_t high_bits; +}; + +enum { + QXL_RELEASE_DRAWABLE, + QXL_RELEASE_SURFACE_CMD, + QXL_RELEASE_CURSOR_CMD, +}; + +/* drm_ prefix to differentiate from qxl_release_info in + * spice-protocol/qxl_dev.h */ +#define QXL_MAX_RES 96 +struct qxl_release { + struct dma_fence base; + + int id; + int type; + struct qxl_bo *release_bo; + uint32_t release_offset; + uint32_t surface_release_id; + struct ww_acquire_ctx ticket; + struct list_head bos; +}; + +struct qxl_drm_chunk { + struct list_head head; + struct qxl_bo *bo; +}; + +struct qxl_drm_image { + struct qxl_bo *bo; + struct list_head chunk_list; +}; + +/* + * Debugfs + */ +struct qxl_debugfs { + struct drm_info_list *files; + unsigned int num_files; +}; + +struct qxl_device { + struct drm_device ddev; + + resource_size_t vram_base, vram_size; + resource_size_t surfaceram_base, surfaceram_size; + resource_size_t rom_base, rom_size; + struct qxl_rom *rom; + + struct qxl_mode *modes; + struct qxl_bo *monitors_config_bo; + struct qxl_monitors_config *monitors_config; + + /* last received client_monitors_config */ + struct qxl_monitors_config *client_monitors_config; + + int io_base; + void *ram; + struct qxl_mman mman; + struct qxl_gem gem; + + void *ram_physical; + + struct qxl_ring *release_ring; + struct qxl_ring *command_ring; + struct qxl_ring *cursor_ring; + + struct qxl_ram_header *ram_header; + + struct qxl_bo *primary_bo; + struct qxl_bo *dumb_shadow_bo; + struct qxl_head *dumb_heads; + + struct qxl_memslot main_slot; + struct qxl_memslot surfaces_slot; + + spinlock_t release_lock; + struct idr release_idr; + uint32_t release_seqno; + atomic_t release_count; + wait_queue_head_t release_event; + spinlock_t release_idr_lock; + struct mutex async_io_mutex; + unsigned int last_sent_io_cmd; + + /* interrupt handling */ + atomic_t irq_received; + atomic_t irq_received_display; + atomic_t irq_received_cursor; + atomic_t irq_received_io_cmd; + unsigned int irq_received_error; + wait_queue_head_t display_event; + wait_queue_head_t cursor_event; + wait_queue_head_t io_cmd_event; + struct work_struct client_monitors_config_work; + + /* debugfs */ + struct qxl_debugfs debugfs[QXL_DEBUGFS_MAX_COMPONENTS]; + unsigned int debugfs_count; + + struct mutex update_area_mutex; + + struct idr surf_id_idr; + spinlock_t surf_id_idr_lock; + int last_alloced_surf_id; + + struct mutex surf_evict_mutex; + struct io_mapping *vram_mapping; + struct io_mapping *surface_mapping; + + /* */ + struct mutex release_mutex; + struct qxl_bo *current_release_bo[3]; + int current_release_bo_offset[3]; + + struct work_struct gc_work; + + struct drm_property *hotplug_mode_update_property; + int monitors_config_width; + int monitors_config_height; +}; + +#define to_qxl(dev) container_of(dev, struct qxl_device, ddev) + +int qxl_debugfs_fence_init(struct qxl_device *rdev); + +int qxl_device_init(struct qxl_device *qdev, struct pci_dev *pdev); +void qxl_device_fini(struct qxl_device *qdev); + +int qxl_modeset_init(struct qxl_device *qdev); +void qxl_modeset_fini(struct qxl_device *qdev); + +int qxl_bo_init(struct qxl_device *qdev); +void qxl_bo_fini(struct qxl_device *qdev); + +void qxl_reinit_memslots(struct qxl_device *qdev); +int qxl_surf_evict(struct qxl_device *qdev); +int qxl_vram_evict(struct qxl_device *qdev); + +struct qxl_ring *qxl_ring_create(struct qxl_ring_header *header, + int element_size, + int n_elements, + int prod_notify, + wait_queue_head_t *push_event); +void qxl_ring_free(struct qxl_ring *ring); +int qxl_check_idle(struct qxl_ring *ring); + +static inline uint64_t +qxl_bo_physical_address(struct qxl_device *qdev, struct qxl_bo *bo, + unsigned long offset) +{ + struct qxl_memslot *slot = + (bo->tbo.resource->mem_type == TTM_PL_VRAM) + ? &qdev->main_slot : &qdev->surfaces_slot; + + /* TODO - need to hold one of the locks to read bo->tbo.resource->start */ + + return slot->high_bits | ((bo->tbo.resource->start << PAGE_SHIFT) + offset); +} + +/* qxl_display.c */ +void qxl_display_read_client_monitors_config(struct qxl_device *qdev); +int qxl_create_monitors_object(struct qxl_device *qdev); +int qxl_destroy_monitors_object(struct qxl_device *qdev); + +/* qxl_gem.c */ +void qxl_gem_init(struct qxl_device *qdev); +void qxl_gem_fini(struct qxl_device *qdev); +int qxl_gem_object_create(struct qxl_device *qdev, int size, + int alignment, int initial_domain, + bool discardable, bool kernel, + struct qxl_surface *surf, + struct drm_gem_object **obj); +int qxl_gem_object_create_with_handle(struct qxl_device *qdev, + struct drm_file *file_priv, + u32 domain, + size_t size, + struct qxl_surface *surf, + struct drm_gem_object **gobj, + uint32_t *handle); +void qxl_gem_object_free(struct drm_gem_object *gobj); +int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv); +void qxl_gem_object_close(struct drm_gem_object *obj, + struct drm_file *file_priv); +void qxl_bo_force_delete(struct qxl_device *qdev); + +/* qxl_dumb.c */ +int qxl_mode_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args); + +/* qxl ttm */ +int qxl_ttm_init(struct qxl_device *qdev); +void qxl_ttm_fini(struct qxl_device *qdev); +int qxl_ttm_io_mem_reserve(struct ttm_device *bdev, + struct ttm_resource *mem); + +/* qxl image */ + +int qxl_image_init(struct qxl_device *qdev, + struct qxl_release *release, + struct qxl_drm_image *dimage, + const uint8_t *data, + int x, int y, int width, int height, + int depth, int stride); +int +qxl_image_alloc_objects(struct qxl_device *qdev, + struct qxl_release *release, + struct qxl_drm_image **image_ptr, + int height, int stride); +void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage); + +void qxl_update_screen(struct qxl_device *qxl); + +/* qxl io operations (qxl_cmd.c) */ + +void qxl_io_create_primary(struct qxl_device *qdev, + struct qxl_bo *bo); +void qxl_io_destroy_primary(struct qxl_device *qdev); +void qxl_io_memslot_add(struct qxl_device *qdev, uint8_t id); +void qxl_io_notify_oom(struct qxl_device *qdev); + +int qxl_io_update_area(struct qxl_device *qdev, struct qxl_bo *surf, + const struct qxl_rect *area); + +void qxl_io_reset(struct qxl_device *qdev); +void qxl_io_monitors_config(struct qxl_device *qdev); +int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible); +void qxl_io_flush_release(struct qxl_device *qdev); +void qxl_io_flush_surfaces(struct qxl_device *qdev); + +union qxl_release_info *qxl_release_map(struct qxl_device *qdev, + struct qxl_release *release); +void qxl_release_unmap(struct qxl_device *qdev, + struct qxl_release *release, + union qxl_release_info *info); +int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo); +int qxl_release_reserve_list(struct qxl_release *release, bool no_intr); +void qxl_release_backoff_reserve_list(struct qxl_release *release); +void qxl_release_fence_buffer_objects(struct qxl_release *release); + +int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, + enum qxl_surface_cmd_type surface_cmd_type, + struct qxl_release *create_rel, + struct qxl_release **release); +int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, + int type, struct qxl_release **release, + struct qxl_bo **rbo); + +int +qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, + uint32_t type, bool interruptible); +int +qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, + uint32_t type, bool interruptible); +int qxl_alloc_bo_reserved(struct qxl_device *qdev, + struct qxl_release *release, + unsigned long size, + struct qxl_bo **_bo); +/* qxl drawing commands */ + +void qxl_draw_dirty_fb(struct qxl_device *qdev, + struct drm_framebuffer *fb, + struct qxl_bo *bo, + unsigned int flags, unsigned int color, + struct drm_clip_rect *clips, + unsigned int num_clips, int inc, + uint32_t dumb_shadow_offset); + +void qxl_release_free(struct qxl_device *qdev, + struct qxl_release *release); + +/* used by qxl_debugfs_release */ +struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, + uint64_t id); + +bool qxl_queue_garbage_collect(struct qxl_device *qdev, bool flush); +int qxl_garbage_collect(struct qxl_device *qdev); + +/* debugfs */ + +void qxl_debugfs_init(struct drm_minor *minor); +void qxl_ttm_debugfs_init(struct qxl_device *qdev); + +/* qxl_prime.c */ +int qxl_gem_prime_pin(struct drm_gem_object *obj); +void qxl_gem_prime_unpin(struct drm_gem_object *obj); +struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object *qxl_gem_prime_import_sg_table( + struct drm_device *dev, struct dma_buf_attachment *attach, + struct sg_table *sgt); +int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); +void qxl_gem_prime_vunmap(struct drm_gem_object *obj, + struct iosys_map *map); + +/* qxl_irq.c */ +int qxl_irq_init(struct qxl_device *qdev); + +void qxl_debugfs_add_files(struct qxl_device *qdev, + struct drm_info_list *files, + unsigned int nfiles); + +int qxl_surface_id_alloc(struct qxl_device *qdev, + struct qxl_bo *surf); +void qxl_surface_id_dealloc(struct qxl_device *qdev, + uint32_t surface_id); +int qxl_hw_surface_alloc(struct qxl_device *qdev, + struct qxl_bo *surf); +int qxl_hw_surface_dealloc(struct qxl_device *qdev, + struct qxl_bo *surf); + +int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo); + +struct qxl_drv_surface * +qxl_surface_lookup(struct drm_device *dev, int surface_id); +void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing); + +/* qxl_ioctl.c */ +int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int qxl_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int qxl_update_area_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +int qxl_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int qxl_clientcap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *file); + +#endif diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c new file mode 100644 index 000000000..17df5c7cc --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_dumb.c @@ -0,0 +1,76 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alon Levy + */ + +#include "qxl_drv.h" +#include "qxl_object.h" + +/* dumb ioctls implementation */ + +int qxl_mode_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct qxl_device *qdev = to_qxl(dev); + struct qxl_bo *qobj; + struct drm_gem_object *gobj; + uint32_t handle; + int r; + struct qxl_surface surf; + uint32_t pitch, format; + + pitch = args->width * ((args->bpp + 1) / 8); + args->size = pitch * args->height; + args->size = ALIGN(args->size, PAGE_SIZE); + + switch (args->bpp) { + case 16: + format = SPICE_SURFACE_FMT_16_565; + break; + case 32: + format = SPICE_SURFACE_FMT_32_xRGB; + break; + default: + return -EINVAL; + } + + surf.width = args->width; + surf.height = args->height; + surf.stride = pitch; + surf.format = format; + surf.data = 0; + + r = qxl_gem_object_create_with_handle(qdev, file_priv, + QXL_GEM_DOMAIN_CPU, + args->size, &surf, &gobj, + &handle); + if (r) + return r; + qobj = gem_to_qxl_bo(gobj); + qobj->is_dumb = true; + drm_gem_object_put(gobj); + args->pitch = pitch; + args->handle = handle; + return 0; +} diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c new file mode 100644 index 000000000..fc5e3763c --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_gem.c @@ -0,0 +1,131 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alon Levy + */ + +#include + +#include "qxl_drv.h" +#include "qxl_object.h" + +void qxl_gem_object_free(struct drm_gem_object *gobj) +{ + struct qxl_bo *qobj = gem_to_qxl_bo(gobj); + struct qxl_device *qdev; + struct ttm_buffer_object *tbo; + + qdev = to_qxl(gobj->dev); + + qxl_surface_evict(qdev, qobj, false); + + tbo = &qobj->tbo; + ttm_bo_put(tbo); +} + +int qxl_gem_object_create(struct qxl_device *qdev, int size, + int alignment, int initial_domain, + bool discardable, bool kernel, + struct qxl_surface *surf, + struct drm_gem_object **obj) +{ + struct qxl_bo *qbo; + int r; + + *obj = NULL; + /* At least align on page size */ + if (alignment < PAGE_SIZE) + alignment = PAGE_SIZE; + r = qxl_bo_create(qdev, size, kernel, false, initial_domain, 0, surf, &qbo); + if (r) { + if (r != -ERESTARTSYS) + DRM_ERROR( + "Failed to allocate GEM object (%d, %d, %u, %d)\n", + size, initial_domain, alignment, r); + return r; + } + *obj = &qbo->tbo.base; + + mutex_lock(&qdev->gem.mutex); + list_add_tail(&qbo->list, &qdev->gem.objects); + mutex_unlock(&qdev->gem.mutex); + + return 0; +} + +/* + * If the caller passed a valid gobj pointer, it is responsible to call + * drm_gem_object_put() when it no longer needs to acess the object. + * + * If gobj is NULL, it is handled internally. + */ +int qxl_gem_object_create_with_handle(struct qxl_device *qdev, + struct drm_file *file_priv, + u32 domain, + size_t size, + struct qxl_surface *surf, + struct drm_gem_object **gobj, + uint32_t *handle) +{ + int r; + struct drm_gem_object *local_gobj; + + BUG_ON(!handle); + + r = qxl_gem_object_create(qdev, size, 0, + domain, + false, false, surf, + &local_gobj); + if (r) + return -ENOMEM; + r = drm_gem_handle_create(file_priv, local_gobj, handle); + if (r) + return r; + + if (gobj) + *gobj = local_gobj; + else + /* drop reference from allocate - handle holds it now */ + drm_gem_object_put(local_gobj); + + return 0; +} + +int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) +{ + return 0; +} + +void qxl_gem_object_close(struct drm_gem_object *obj, + struct drm_file *file_priv) +{ +} + +void qxl_gem_init(struct qxl_device *qdev) +{ + INIT_LIST_HEAD(&qdev->gem.objects); +} + +void qxl_gem_fini(struct qxl_device *qdev) +{ + qxl_bo_force_delete(qdev); +} diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c new file mode 100644 index 000000000..ffff54e5f --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_image.c @@ -0,0 +1,240 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alon Levy + */ + +#include +#include + +#include "qxl_drv.h" +#include "qxl_object.h" + +static int +qxl_allocate_chunk(struct qxl_device *qdev, + struct qxl_release *release, + struct qxl_drm_image *image, + unsigned int chunk_size) +{ + struct qxl_drm_chunk *chunk; + int ret; + + chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL); + if (!chunk) + return -ENOMEM; + + ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo); + if (ret) { + kfree(chunk); + return ret; + } + + list_add_tail(&chunk->head, &image->chunk_list); + return 0; +} + +int +qxl_image_alloc_objects(struct qxl_device *qdev, + struct qxl_release *release, + struct qxl_drm_image **image_ptr, + int height, int stride) +{ + struct qxl_drm_image *image; + int ret; + + image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL); + if (!image) + return -ENOMEM; + + INIT_LIST_HEAD(&image->chunk_list); + + ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo); + if (ret) { + kfree(image); + return ret; + } + + ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height); + if (ret) { + qxl_bo_unref(&image->bo); + kfree(image); + return ret; + } + *image_ptr = image; + return 0; +} + +void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage) +{ + struct qxl_drm_chunk *chunk, *tmp; + + list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) { + qxl_bo_unref(&chunk->bo); + kfree(chunk); + } + + qxl_bo_unref(&dimage->bo); + kfree(dimage); +} + +static int +qxl_image_init_helper(struct qxl_device *qdev, + struct qxl_release *release, + struct qxl_drm_image *dimage, + const uint8_t *data, + int width, int height, + int depth, unsigned int hash, + int stride) +{ + struct qxl_drm_chunk *drv_chunk; + struct qxl_image *image; + struct qxl_data_chunk *chunk; + int i; + int chunk_stride; + int linesize = width * depth / 8; + struct qxl_bo *chunk_bo, *image_bo; + void *ptr; + /* Chunk */ + /* FIXME: Check integer overflow */ + /* TODO: variable number of chunks */ + + drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head); + + chunk_bo = drv_chunk->bo; + chunk_stride = stride; /* TODO: should use linesize, but it renders + wrong (check the bitmaps are sent correctly + first) */ + + ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); + chunk = ptr; + chunk->data_size = height * chunk_stride; + chunk->prev_chunk = 0; + chunk->next_chunk = 0; + qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr); + + { + void *k_data, *i_data; + int remain; + int page; + int size; + + if (stride == linesize && chunk_stride == stride) { + remain = linesize * height; + page = 0; + i_data = (void *)data; + + while (remain > 0) { + ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page << PAGE_SHIFT); + + if (page == 0) { + chunk = ptr; + k_data = chunk->data; + size = PAGE_SIZE - offsetof(struct qxl_data_chunk, data); + } else { + k_data = ptr; + size = PAGE_SIZE; + } + size = min(size, remain); + + memcpy(k_data, i_data, size); + + qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr); + i_data += size; + remain -= size; + page++; + } + } else { + unsigned int page_base, page_offset, out_offset; + + for (i = 0 ; i < height ; ++i) { + i_data = (void *)data + i * stride; + remain = linesize; + out_offset = offsetof(struct qxl_data_chunk, data) + i * chunk_stride; + + while (remain > 0) { + page_base = out_offset & PAGE_MASK; + page_offset = offset_in_page(out_offset); + size = min((int)(PAGE_SIZE - page_offset), remain); + + ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base); + k_data = ptr + page_offset; + memcpy(k_data, i_data, size); + qxl_bo_kunmap_atomic_page(qdev, chunk_bo, ptr); + remain -= size; + i_data += size; + out_offset += size; + } + } + } + } + qxl_bo_vunmap_locked(chunk_bo); + + image_bo = dimage->bo; + ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0); + image = ptr; + + image->descriptor.id = 0; + image->descriptor.type = SPICE_IMAGE_TYPE_BITMAP; + + image->descriptor.flags = 0; + image->descriptor.width = width; + image->descriptor.height = height; + + switch (depth) { + case 1: + /* TODO: BE? check by arch? */ + image->u.bitmap.format = SPICE_BITMAP_FMT_1BIT_BE; + break; + case 24: + image->u.bitmap.format = SPICE_BITMAP_FMT_24BIT; + break; + case 32: + image->u.bitmap.format = SPICE_BITMAP_FMT_32BIT; + break; + default: + DRM_ERROR("unsupported image bit depth\n"); + qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); + return -EINVAL; + } + image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN; + image->u.bitmap.x = width; + image->u.bitmap.y = height; + image->u.bitmap.stride = chunk_stride; + image->u.bitmap.palette = 0; + image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0); + + qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); + + return 0; +} + +int qxl_image_init(struct qxl_device *qdev, + struct qxl_release *release, + struct qxl_drm_image *dimage, + const uint8_t *data, + int x, int y, int width, int height, + int depth, int stride) +{ + data += y * stride + x * (depth / 8); + return qxl_image_init_helper(qdev, release, dimage, data, + width, height, depth, 0, stride); +} diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c new file mode 100644 index 000000000..dd0f834d8 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -0,0 +1,415 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alon Levy + */ + +#include +#include + +#include "qxl_drv.h" +#include "qxl_object.h" + +/* + * TODO: allocating a new gem(in qxl_bo) for each request. + * This is wasteful since bo's are page aligned. + */ +int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct qxl_device *qdev = to_qxl(dev); + struct drm_qxl_alloc *qxl_alloc = data; + int ret; + uint32_t handle; + u32 domain = QXL_GEM_DOMAIN_VRAM; + + if (qxl_alloc->size == 0) { + DRM_ERROR("invalid size %d\n", qxl_alloc->size); + return -EINVAL; + } + ret = qxl_gem_object_create_with_handle(qdev, file_priv, + domain, + qxl_alloc->size, + NULL, + NULL, &handle); + if (ret) { + DRM_ERROR("%s: failed to create gem ret=%d\n", + __func__, ret); + return -ENOMEM; + } + qxl_alloc->handle = handle; + return 0; +} + +int qxl_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct qxl_device *qdev = to_qxl(dev); + struct drm_qxl_map *qxl_map = data; + + return drm_gem_ttm_dumb_map_offset(file_priv, &qdev->ddev, qxl_map->handle, + &qxl_map->offset); +} + +struct qxl_reloc_info { + int type; + struct qxl_bo *dst_bo; + uint32_t dst_offset; + struct qxl_bo *src_bo; + int src_offset; +}; + +/* + * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's + * are on vram). + * *(dst + dst_off) = qxl_bo_physical_address(src, src_off) + */ +static void +apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) +{ + void *reloc_page; + + reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); + *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, + info->src_bo, + info->src_offset); + qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page); +} + +static void +apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) +{ + uint32_t id = 0; + void *reloc_page; + + if (info->src_bo && !info->src_bo->is_primary) + id = info->src_bo->surface_id; + + reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); + *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id; + qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page); +} + +/* return holding the reference to this object */ +static int qxlhw_handle_to_bo(struct drm_file *file_priv, uint64_t handle, + struct qxl_release *release, struct qxl_bo **qbo_p) +{ + struct drm_gem_object *gobj; + struct qxl_bo *qobj; + int ret; + + gobj = drm_gem_object_lookup(file_priv, handle); + if (!gobj) + return -EINVAL; + + qobj = gem_to_qxl_bo(gobj); + + ret = qxl_release_list_add(release, qobj); + drm_gem_object_put(gobj); + if (ret) + return ret; + + *qbo_p = qobj; + return 0; +} + +/* + * Usage of execbuffer: + * Relocations need to take into account the full QXLDrawable size. + * However, the command as passed from user space must *not* contain the initial + * QXLReleaseInfo struct (first XXX bytes) + */ +static int qxl_process_single_command(struct qxl_device *qdev, + struct drm_qxl_command *cmd, + struct drm_file *file_priv) +{ + struct qxl_reloc_info *reloc_info; + int release_type; + struct qxl_release *release; + struct qxl_bo *cmd_bo; + void *fb_cmd; + int i, ret, num_relocs; + int unwritten; + + switch (cmd->type) { + case QXL_CMD_DRAW: + release_type = QXL_RELEASE_DRAWABLE; + break; + case QXL_CMD_SURFACE: + case QXL_CMD_CURSOR: + default: + DRM_DEBUG("Only draw commands in execbuffers\n"); + return -EINVAL; + } + + if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info)) + return -EINVAL; + + if (!access_ok(u64_to_user_ptr(cmd->command), + cmd->command_size)) + return -EFAULT; + + reloc_info = kmalloc_array(cmd->relocs_num, + sizeof(struct qxl_reloc_info), GFP_KERNEL); + if (!reloc_info) + return -ENOMEM; + + ret = qxl_alloc_release_reserved(qdev, + sizeof(union qxl_release_info) + + cmd->command_size, + release_type, + &release, + &cmd_bo); + if (ret) + goto out_free_reloc; + + /* TODO copy slow path code from i915 */ + fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_MASK)); + unwritten = __copy_from_user_inatomic_nocache + (fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_MASK), + u64_to_user_ptr(cmd->command), cmd->command_size); + + { + struct qxl_drawable *draw = fb_cmd; + + draw->mm_time = qdev->rom->mm_clock; + } + + qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd); + if (unwritten) { + DRM_ERROR("got unwritten %d\n", unwritten); + ret = -EFAULT; + goto out_free_release; + } + + /* fill out reloc info structs */ + num_relocs = 0; + for (i = 0; i < cmd->relocs_num; ++i) { + struct drm_qxl_reloc reloc; + struct drm_qxl_reloc __user *u = u64_to_user_ptr(cmd->relocs); + + if (copy_from_user(&reloc, u + i, sizeof(reloc))) { + ret = -EFAULT; + goto out_free_bos; + } + + /* add the bos to the list of bos to validate - + need to validate first then process relocs? */ + if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) { + DRM_DEBUG("unknown reloc type %d\n", reloc.reloc_type); + + ret = -EINVAL; + goto out_free_bos; + } + reloc_info[i].type = reloc.reloc_type; + + if (reloc.dst_handle) { + ret = qxlhw_handle_to_bo(file_priv, reloc.dst_handle, release, + &reloc_info[i].dst_bo); + if (ret) + goto out_free_bos; + reloc_info[i].dst_offset = reloc.dst_offset; + } else { + reloc_info[i].dst_bo = cmd_bo; + reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset; + } + num_relocs++; + + /* reserve and validate the reloc dst bo */ + if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle) { + ret = qxlhw_handle_to_bo(file_priv, reloc.src_handle, release, + &reloc_info[i].src_bo); + if (ret) + goto out_free_bos; + reloc_info[i].src_offset = reloc.src_offset; + } else { + reloc_info[i].src_bo = NULL; + reloc_info[i].src_offset = 0; + } + } + + /* validate all buffers */ + ret = qxl_release_reserve_list(release, false); + if (ret) + goto out_free_bos; + + for (i = 0; i < cmd->relocs_num; ++i) { + if (reloc_info[i].type == QXL_RELOC_TYPE_BO) + apply_reloc(qdev, &reloc_info[i]); + else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF) + apply_surf_reloc(qdev, &reloc_info[i]); + } + + qxl_release_fence_buffer_objects(release); + ret = qxl_push_command_ring_release(qdev, release, cmd->type, true); + +out_free_bos: +out_free_release: + if (ret) + qxl_release_free(qdev, release); +out_free_reloc: + kfree(reloc_info); + return ret; +} + +int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct qxl_device *qdev = to_qxl(dev); + struct drm_qxl_execbuffer *execbuffer = data; + struct drm_qxl_command user_cmd; + int cmd_num; + int ret; + + for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { + + struct drm_qxl_command __user *commands = + u64_to_user_ptr(execbuffer->commands); + + if (copy_from_user(&user_cmd, commands + cmd_num, + sizeof(user_cmd))) + return -EFAULT; + + ret = qxl_process_single_command(qdev, &user_cmd, file_priv); + if (ret) + return ret; + } + return 0; +} + +int qxl_update_area_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct qxl_device *qdev = to_qxl(dev); + struct drm_qxl_update_area *update_area = data; + struct qxl_rect area = {.left = update_area->left, + .top = update_area->top, + .right = update_area->right, + .bottom = update_area->bottom}; + int ret; + struct drm_gem_object *gobj = NULL; + struct qxl_bo *qobj = NULL; + struct ttm_operation_ctx ctx = { true, false }; + + if (update_area->left >= update_area->right || + update_area->top >= update_area->bottom) + return -EINVAL; + + gobj = drm_gem_object_lookup(file, update_area->handle); + if (gobj == NULL) + return -ENOENT; + + qobj = gem_to_qxl_bo(gobj); + + ret = qxl_bo_reserve(qobj); + if (ret) + goto out; + + if (!qobj->tbo.pin_count) { + qxl_ttm_placement_from_domain(qobj, qobj->type); + ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, &ctx); + if (unlikely(ret)) + goto out; + } + + ret = qxl_bo_check_id(qdev, qobj); + if (ret) + goto out2; + if (!qobj->surface_id) + DRM_ERROR("got update area for surface with no id %d\n", update_area->handle); + ret = qxl_io_update_area(qdev, qobj, &area); + +out2: + qxl_bo_unreserve(qobj); + +out: + drm_gem_object_put(gobj); + return ret; +} + +int qxl_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct qxl_device *qdev = to_qxl(dev); + struct drm_qxl_getparam *param = data; + + switch (param->param) { + case QXL_PARAM_NUM_SURFACES: + param->value = qdev->rom->n_surfaces; + break; + case QXL_PARAM_MAX_RELOCS: + param->value = QXL_MAX_RES; + break; + default: + return -EINVAL; + } + return 0; +} + +int qxl_clientcap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) +{ + struct qxl_device *qdev = to_qxl(dev); + struct pci_dev *pdev = to_pci_dev(dev->dev); + struct drm_qxl_clientcap *param = data; + int byte, idx; + + byte = param->index / 8; + idx = param->index % 8; + + if (pdev->revision < 4) + return -ENOSYS; + + if (byte >= 58) + return -ENOSYS; + + if (qdev->rom->client_capabilities[byte] & (1 << idx)) + return 0; + return -ENOSYS; +} + +int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct qxl_device *qdev = to_qxl(dev); + struct drm_qxl_alloc_surf *param = data; + int handle; + int ret; + int size, actual_stride; + struct qxl_surface surf; + + /* work out size allocate bo with handle */ + actual_stride = param->stride < 0 ? -param->stride : param->stride; + size = actual_stride * param->height + actual_stride; + + surf.format = param->format; + surf.width = param->width; + surf.height = param->height; + surf.stride = param->stride; + surf.data = 0; + + ret = qxl_gem_object_create_with_handle(qdev, file, + QXL_GEM_DOMAIN_SURFACE, + size, + &surf, + NULL, &handle); + if (ret) { + DRM_ERROR("%s: failed to create gem ret=%d\n", + __func__, ret); + return -ENOMEM; + } else + param->handle = handle; + return ret; +} diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c new file mode 100644 index 000000000..665278ee3 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_irq.c @@ -0,0 +1,106 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alon Levy + */ + +#include + +#include + +#include "qxl_drv.h" + +static irqreturn_t qxl_irq_handler(int irq, void *arg) +{ + struct drm_device *dev = (struct drm_device *) arg; + struct qxl_device *qdev = to_qxl(dev); + uint32_t pending; + + pending = xchg(&qdev->ram_header->int_pending, 0); + + if (!pending) + return IRQ_NONE; + + atomic_inc(&qdev->irq_received); + + if (pending & QXL_INTERRUPT_DISPLAY) { + atomic_inc(&qdev->irq_received_display); + wake_up_all(&qdev->display_event); + qxl_queue_garbage_collect(qdev, false); + } + if (pending & QXL_INTERRUPT_CURSOR) { + atomic_inc(&qdev->irq_received_cursor); + wake_up_all(&qdev->cursor_event); + } + if (pending & QXL_INTERRUPT_IO_CMD) { + atomic_inc(&qdev->irq_received_io_cmd); + wake_up_all(&qdev->io_cmd_event); + } + if (pending & QXL_INTERRUPT_ERROR) { + /* TODO: log it, reset device (only way to exit this condition) + * (do it a certain number of times, afterwards admit defeat, + * to avoid endless loops). + */ + qdev->irq_received_error++; + DRM_WARN("driver is in bug mode\n"); + } + if (pending & QXL_INTERRUPT_CLIENT_MONITORS_CONFIG) { + schedule_work(&qdev->client_monitors_config_work); + } + qdev->ram_header->int_mask = QXL_INTERRUPT_MASK; + outb(0, qdev->io_base + QXL_IO_UPDATE_IRQ); + return IRQ_HANDLED; +} + +static void qxl_client_monitors_config_work_func(struct work_struct *work) +{ + struct qxl_device *qdev = container_of(work, struct qxl_device, + client_monitors_config_work); + + qxl_display_read_client_monitors_config(qdev); +} + +int qxl_irq_init(struct qxl_device *qdev) +{ + struct drm_device *ddev = &qdev->ddev; + struct pci_dev *pdev = to_pci_dev(ddev->dev); + int ret; + + init_waitqueue_head(&qdev->display_event); + init_waitqueue_head(&qdev->cursor_event); + init_waitqueue_head(&qdev->io_cmd_event); + init_waitqueue_head(&qdev->release_event); + INIT_WORK(&qdev->client_monitors_config_work, + qxl_client_monitors_config_work_func); + atomic_set(&qdev->irq_received, 0); + atomic_set(&qdev->irq_received_display, 0); + atomic_set(&qdev->irq_received_cursor, 0); + atomic_set(&qdev->irq_received_io_cmd, 0); + qdev->irq_received_error = 0; + ret = request_irq(pdev->irq, qxl_irq_handler, IRQF_SHARED, ddev->driver->name, ddev); + qdev->ram_header->int_mask = QXL_INTERRUPT_MASK; + if (unlikely(ret != 0)) { + DRM_ERROR("Failed installing irq: %d\n", ret); + return 1; + } + return 0; +} diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c new file mode 100644 index 000000000..dc3828db1 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_kms.c @@ -0,0 +1,322 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alon Levy + */ + +#include +#include + +#include +#include +#include + +#include "qxl_drv.h" +#include "qxl_object.h" + +static bool qxl_check_device(struct qxl_device *qdev) +{ + struct qxl_rom *rom = qdev->rom; + + if (rom->magic != 0x4f525851) { + DRM_ERROR("bad rom signature %x\n", rom->magic); + return false; + } + + DRM_INFO("Device Version %d.%d\n", rom->id, rom->update_id); + DRM_INFO("Compression level %d log level %d\n", rom->compression_level, + rom->log_level); + DRM_INFO("%d io pages at offset 0x%x\n", + rom->num_io_pages, rom->pages_offset); + DRM_INFO("%d byte draw area at offset 0x%x\n", + rom->surface0_area_size, rom->draw_area_offset); + + qdev->vram_size = rom->surface0_area_size; + DRM_INFO("RAM header offset: 0x%x\n", rom->ram_header_offset); + return true; +} + +static void setup_hw_slot(struct qxl_device *qdev, struct qxl_memslot *slot) +{ + qdev->ram_header->mem_slot.mem_start = slot->start_phys_addr; + qdev->ram_header->mem_slot.mem_end = slot->start_phys_addr + slot->size; + qxl_io_memslot_add(qdev, qdev->rom->slots_start + slot->index); +} + +static void setup_slot(struct qxl_device *qdev, + struct qxl_memslot *slot, + unsigned int slot_index, + const char *slot_name, + unsigned long start_phys_addr, + unsigned long size) +{ + uint64_t high_bits; + + slot->index = slot_index; + slot->name = slot_name; + slot->start_phys_addr = start_phys_addr; + slot->size = size; + + setup_hw_slot(qdev, slot); + + slot->generation = qdev->rom->slot_generation; + high_bits = (qdev->rom->slots_start + slot->index) + << qdev->rom->slot_gen_bits; + high_bits |= slot->generation; + high_bits <<= (64 - (qdev->rom->slot_gen_bits + qdev->rom->slot_id_bits)); + slot->high_bits = high_bits; + + DRM_INFO("slot %d (%s): base 0x%08lx, size 0x%08lx\n", + slot->index, slot->name, + (unsigned long)slot->start_phys_addr, + (unsigned long)slot->size); +} + +void qxl_reinit_memslots(struct qxl_device *qdev) +{ + setup_hw_slot(qdev, &qdev->main_slot); + setup_hw_slot(qdev, &qdev->surfaces_slot); +} + +static void qxl_gc_work(struct work_struct *work) +{ + struct qxl_device *qdev = container_of(work, struct qxl_device, gc_work); + + qxl_garbage_collect(qdev); +} + +int qxl_device_init(struct qxl_device *qdev, + struct pci_dev *pdev) +{ + int r, sb; + + pci_set_drvdata(pdev, &qdev->ddev); + + mutex_init(&qdev->gem.mutex); + mutex_init(&qdev->update_area_mutex); + mutex_init(&qdev->release_mutex); + mutex_init(&qdev->surf_evict_mutex); + qxl_gem_init(qdev); + + qdev->rom_base = pci_resource_start(pdev, 2); + qdev->rom_size = pci_resource_len(pdev, 2); + qdev->vram_base = pci_resource_start(pdev, 0); + qdev->io_base = pci_resource_start(pdev, 3); + + qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0)); + if (!qdev->vram_mapping) { + pr_err("Unable to create vram_mapping"); + return -ENOMEM; + } + + if (pci_resource_len(pdev, 4) > 0) { + /* 64bit surface bar present */ + sb = 4; + qdev->surfaceram_base = pci_resource_start(pdev, sb); + qdev->surfaceram_size = pci_resource_len(pdev, sb); + qdev->surface_mapping = + io_mapping_create_wc(qdev->surfaceram_base, + qdev->surfaceram_size); + } + if (qdev->surface_mapping == NULL) { + /* 64bit surface bar not present (or mapping failed) */ + sb = 1; + qdev->surfaceram_base = pci_resource_start(pdev, sb); + qdev->surfaceram_size = pci_resource_len(pdev, sb); + qdev->surface_mapping = + io_mapping_create_wc(qdev->surfaceram_base, + qdev->surfaceram_size); + if (!qdev->surface_mapping) { + pr_err("Unable to create surface_mapping"); + r = -ENOMEM; + goto vram_mapping_free; + } + } + + DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n", + (unsigned long long)qdev->vram_base, + (unsigned long long)pci_resource_end(pdev, 0), + (int)pci_resource_len(pdev, 0) / 1024 / 1024, + (int)pci_resource_len(pdev, 0) / 1024, + (unsigned long long)qdev->surfaceram_base, + (unsigned long long)pci_resource_end(pdev, sb), + (int)qdev->surfaceram_size / 1024 / 1024, + (int)qdev->surfaceram_size / 1024, + (sb == 4) ? "64bit" : "32bit"); + + qdev->rom = ioremap_wc(qdev->rom_base, qdev->rom_size); + if (!qdev->rom) { + pr_err("Unable to ioremap ROM\n"); + r = -ENOMEM; + goto surface_mapping_free; + } + + if (!qxl_check_device(qdev)) { + r = -ENODEV; + goto rom_unmap; + } + + r = qxl_bo_init(qdev); + if (r) { + DRM_ERROR("bo init failed %d\n", r); + goto rom_unmap; + } + + qdev->ram_header = ioremap_wc(qdev->vram_base + + qdev->rom->ram_header_offset, + sizeof(*qdev->ram_header)); + if (!qdev->ram_header) { + DRM_ERROR("Unable to ioremap RAM header\n"); + r = -ENOMEM; + goto bo_fini; + } + + qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr), + sizeof(struct qxl_command), + QXL_COMMAND_RING_SIZE, + qdev->io_base + QXL_IO_NOTIFY_CMD, + &qdev->display_event); + if (!qdev->command_ring) { + DRM_ERROR("Unable to create command ring\n"); + r = -ENOMEM; + goto ram_header_unmap; + } + + qdev->cursor_ring = qxl_ring_create( + &(qdev->ram_header->cursor_ring_hdr), + sizeof(struct qxl_command), + QXL_CURSOR_RING_SIZE, + qdev->io_base + QXL_IO_NOTIFY_CURSOR, + &qdev->cursor_event); + + if (!qdev->cursor_ring) { + DRM_ERROR("Unable to create cursor ring\n"); + r = -ENOMEM; + goto command_ring_free; + } + + qdev->release_ring = qxl_ring_create( + &(qdev->ram_header->release_ring_hdr), + sizeof(uint64_t), + QXL_RELEASE_RING_SIZE, 0, + NULL); + + if (!qdev->release_ring) { + DRM_ERROR("Unable to create release ring\n"); + r = -ENOMEM; + goto cursor_ring_free; + } + + idr_init_base(&qdev->release_idr, 1); + spin_lock_init(&qdev->release_idr_lock); + spin_lock_init(&qdev->release_lock); + + idr_init_base(&qdev->surf_id_idr, 1); + spin_lock_init(&qdev->surf_id_idr_lock); + + mutex_init(&qdev->async_io_mutex); + + /* reset the device into a known state - no memslots, no primary + * created, no surfaces. */ + qxl_io_reset(qdev); + + /* must initialize irq before first async io - slot creation */ + r = qxl_irq_init(qdev); + if (r) { + DRM_ERROR("Unable to init qxl irq\n"); + goto release_ring_free; + } + + /* + * Note that virtual is surface0. We rely on the single ioremap done + * before. + */ + setup_slot(qdev, &qdev->main_slot, 0, "main", + (unsigned long)qdev->vram_base, + (unsigned long)qdev->rom->ram_header_offset); + setup_slot(qdev, &qdev->surfaces_slot, 1, "surfaces", + (unsigned long)qdev->surfaceram_base, + (unsigned long)qdev->surfaceram_size); + + INIT_WORK(&qdev->gc_work, qxl_gc_work); + + return 0; + +release_ring_free: + qxl_ring_free(qdev->release_ring); +cursor_ring_free: + qxl_ring_free(qdev->cursor_ring); +command_ring_free: + qxl_ring_free(qdev->command_ring); +ram_header_unmap: + iounmap(qdev->ram_header); +bo_fini: + qxl_bo_fini(qdev); +rom_unmap: + iounmap(qdev->rom); +surface_mapping_free: + io_mapping_free(qdev->surface_mapping); +vram_mapping_free: + io_mapping_free(qdev->vram_mapping); + return r; +} + +void qxl_device_fini(struct qxl_device *qdev) +{ + int cur_idx; + + /* check if qxl_device_init() was successful (gc_work is initialized last) */ + if (!qdev->gc_work.func) + return; + + for (cur_idx = 0; cur_idx < 3; cur_idx++) { + if (!qdev->current_release_bo[cur_idx]) + continue; + qxl_bo_unpin(qdev->current_release_bo[cur_idx]); + qxl_bo_unref(&qdev->current_release_bo[cur_idx]); + qdev->current_release_bo_offset[cur_idx] = 0; + qdev->current_release_bo[cur_idx] = NULL; + } + + /* + * Ask host to release resources (+fill release ring), + * then wait for the release actually happening. + */ + qxl_io_notify_oom(qdev); + wait_event_timeout(qdev->release_event, + atomic_read(&qdev->release_count) == 0, + HZ); + flush_work(&qdev->gc_work); + qxl_surf_evict(qdev); + qxl_vram_evict(qdev); + + qxl_gem_fini(qdev); + qxl_bo_fini(qdev); + qxl_ring_free(qdev->command_ring); + qxl_ring_free(qdev->cursor_ring); + qxl_ring_free(qdev->release_ring); + io_mapping_free(qdev->surface_mapping); + io_mapping_free(qdev->vram_mapping); + iounmap(qdev->ram_header); + iounmap(qdev->rom); + qdev->rom = NULL; +} diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c new file mode 100644 index 000000000..695d9308d --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -0,0 +1,414 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alon Levy + */ + +#include +#include + +#include "qxl_drv.h" +#include "qxl_object.h" + +static int __qxl_bo_pin(struct qxl_bo *bo); +static void __qxl_bo_unpin(struct qxl_bo *bo); + +static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo) +{ + struct qxl_bo *bo; + struct qxl_device *qdev; + + bo = to_qxl_bo(tbo); + qdev = to_qxl(bo->tbo.base.dev); + + qxl_surface_evict(qdev, bo, false); + WARN_ON_ONCE(bo->map_count > 0); + mutex_lock(&qdev->gem.mutex); + list_del_init(&bo->list); + mutex_unlock(&qdev->gem.mutex); + drm_gem_object_release(&bo->tbo.base); + kfree(bo); +} + +bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo) +{ + if (bo->destroy == &qxl_ttm_bo_destroy) + return true; + return false; +} + +void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) +{ + u32 c = 0; + u32 pflag = 0; + unsigned int i; + + if (qbo->tbo.base.size <= PAGE_SIZE) + pflag |= TTM_PL_FLAG_TOPDOWN; + + qbo->placement.placement = qbo->placements; + qbo->placement.busy_placement = qbo->placements; + if (domain == QXL_GEM_DOMAIN_VRAM) { + qbo->placements[c].mem_type = TTM_PL_VRAM; + qbo->placements[c++].flags = pflag; + } + if (domain == QXL_GEM_DOMAIN_SURFACE) { + qbo->placements[c].mem_type = TTM_PL_PRIV; + qbo->placements[c++].flags = pflag; + qbo->placements[c].mem_type = TTM_PL_VRAM; + qbo->placements[c++].flags = pflag; + } + if (domain == QXL_GEM_DOMAIN_CPU) { + qbo->placements[c].mem_type = TTM_PL_SYSTEM; + qbo->placements[c++].flags = pflag; + } + if (!c) { + qbo->placements[c].mem_type = TTM_PL_SYSTEM; + qbo->placements[c++].flags = 0; + } + qbo->placement.num_placement = c; + qbo->placement.num_busy_placement = c; + for (i = 0; i < c; ++i) { + qbo->placements[i].fpfn = 0; + qbo->placements[i].lpfn = 0; + } +} + +static const struct drm_gem_object_funcs qxl_object_funcs = { + .free = qxl_gem_object_free, + .open = qxl_gem_object_open, + .close = qxl_gem_object_close, + .pin = qxl_gem_prime_pin, + .unpin = qxl_gem_prime_unpin, + .get_sg_table = qxl_gem_prime_get_sg_table, + .vmap = qxl_gem_prime_vmap, + .vunmap = qxl_gem_prime_vunmap, + .mmap = drm_gem_ttm_mmap, + .print_info = drm_gem_ttm_print_info, +}; + +int qxl_bo_create(struct qxl_device *qdev, unsigned long size, + bool kernel, bool pinned, u32 domain, u32 priority, + struct qxl_surface *surf, + struct qxl_bo **bo_ptr) +{ + struct ttm_operation_ctx ctx = { !kernel, false }; + struct qxl_bo *bo; + enum ttm_bo_type type; + int r; + + if (kernel) + type = ttm_bo_type_kernel; + else + type = ttm_bo_type_device; + *bo_ptr = NULL; + bo = kzalloc(sizeof(struct qxl_bo), GFP_KERNEL); + if (bo == NULL) + return -ENOMEM; + size = roundup(size, PAGE_SIZE); + r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size); + if (unlikely(r)) { + kfree(bo); + return r; + } + bo->tbo.base.funcs = &qxl_object_funcs; + bo->type = domain; + bo->surface_id = 0; + INIT_LIST_HEAD(&bo->list); + + if (surf) + bo->surf = *surf; + + qxl_ttm_placement_from_domain(bo, domain); + + bo->tbo.priority = priority; + r = ttm_bo_init_reserved(&qdev->mman.bdev, &bo->tbo, type, + &bo->placement, 0, &ctx, NULL, NULL, + &qxl_ttm_bo_destroy); + if (unlikely(r != 0)) { + if (r != -ERESTARTSYS) + dev_err(qdev->ddev.dev, + "object_init failed for (%lu, 0x%08X)\n", + size, domain); + return r; + } + if (pinned) + ttm_bo_pin(&bo->tbo); + ttm_bo_unreserve(&bo->tbo); + *bo_ptr = bo; + return 0; +} + +int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map) +{ + int r; + + dma_resv_assert_held(bo->tbo.base.resv); + + if (bo->kptr) { + bo->map_count++; + goto out; + } + r = ttm_bo_vmap(&bo->tbo, &bo->map); + if (r) + return r; + bo->map_count = 1; + + /* TODO: Remove kptr in favor of map everywhere. */ + if (bo->map.is_iomem) + bo->kptr = (void *)bo->map.vaddr_iomem; + else + bo->kptr = bo->map.vaddr; + +out: + *map = bo->map; + return 0; +} + +int qxl_bo_vmap(struct qxl_bo *bo, struct iosys_map *map) +{ + int r; + + r = qxl_bo_reserve(bo); + if (r) + return r; + + r = __qxl_bo_pin(bo); + if (r) { + qxl_bo_unreserve(bo); + return r; + } + + r = qxl_bo_vmap_locked(bo, map); + qxl_bo_unreserve(bo); + return r; +} + +void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, + struct qxl_bo *bo, int page_offset) +{ + unsigned long offset; + void *rptr; + int ret; + struct io_mapping *map; + struct iosys_map bo_map; + + if (bo->tbo.resource->mem_type == TTM_PL_VRAM) + map = qdev->vram_mapping; + else if (bo->tbo.resource->mem_type == TTM_PL_PRIV) + map = qdev->surface_mapping; + else + goto fallback; + + offset = bo->tbo.resource->start << PAGE_SHIFT; + return io_mapping_map_atomic_wc(map, offset + page_offset); +fallback: + if (bo->kptr) { + rptr = bo->kptr + (page_offset * PAGE_SIZE); + return rptr; + } + + ret = qxl_bo_vmap_locked(bo, &bo_map); + if (ret) + return NULL; + rptr = bo_map.vaddr; /* TODO: Use mapping abstraction properly */ + + rptr += page_offset * PAGE_SIZE; + return rptr; +} + +void qxl_bo_vunmap_locked(struct qxl_bo *bo) +{ + dma_resv_assert_held(bo->tbo.base.resv); + + if (bo->kptr == NULL) + return; + bo->map_count--; + if (bo->map_count > 0) + return; + bo->kptr = NULL; + ttm_bo_vunmap(&bo->tbo, &bo->map); +} + +int qxl_bo_vunmap(struct qxl_bo *bo) +{ + int r; + + r = qxl_bo_reserve(bo); + if (r) + return r; + + qxl_bo_vunmap_locked(bo); + __qxl_bo_unpin(bo); + qxl_bo_unreserve(bo); + return 0; +} + +void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, + struct qxl_bo *bo, void *pmap) +{ + if ((bo->tbo.resource->mem_type != TTM_PL_VRAM) && + (bo->tbo.resource->mem_type != TTM_PL_PRIV)) + goto fallback; + + io_mapping_unmap_atomic(pmap); + return; + fallback: + qxl_bo_vunmap_locked(bo); +} + +void qxl_bo_unref(struct qxl_bo **bo) +{ + if ((*bo) == NULL) + return; + + drm_gem_object_put(&(*bo)->tbo.base); + *bo = NULL; +} + +struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo) +{ + drm_gem_object_get(&bo->tbo.base); + return bo; +} + +static int __qxl_bo_pin(struct qxl_bo *bo) +{ + struct ttm_operation_ctx ctx = { false, false }; + struct drm_device *ddev = bo->tbo.base.dev; + int r; + + if (bo->tbo.pin_count) { + ttm_bo_pin(&bo->tbo); + return 0; + } + qxl_ttm_placement_from_domain(bo, bo->type); + r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (likely(r == 0)) + ttm_bo_pin(&bo->tbo); + if (unlikely(r != 0)) + dev_err(ddev->dev, "%p pin failed\n", bo); + return r; +} + +static void __qxl_bo_unpin(struct qxl_bo *bo) +{ + ttm_bo_unpin(&bo->tbo); +} + +/* + * Reserve the BO before pinning the object. If the BO was reserved + * beforehand, use the internal version directly __qxl_bo_pin. + * + */ +int qxl_bo_pin(struct qxl_bo *bo) +{ + int r; + + r = qxl_bo_reserve(bo); + if (r) + return r; + + r = __qxl_bo_pin(bo); + qxl_bo_unreserve(bo); + return r; +} + +/* + * Reserve the BO before pinning the object. If the BO was reserved + * beforehand, use the internal version directly __qxl_bo_unpin. + * + */ +int qxl_bo_unpin(struct qxl_bo *bo) +{ + int r; + + r = qxl_bo_reserve(bo); + if (r) + return r; + + __qxl_bo_unpin(bo); + qxl_bo_unreserve(bo); + return 0; +} + +void qxl_bo_force_delete(struct qxl_device *qdev) +{ + struct qxl_bo *bo, *n; + + if (list_empty(&qdev->gem.objects)) + return; + dev_err(qdev->ddev.dev, "Userspace still has active objects !\n"); + list_for_each_entry_safe(bo, n, &qdev->gem.objects, list) { + dev_err(qdev->ddev.dev, "%p %p %lu %lu force free\n", + &bo->tbo.base, bo, (unsigned long)bo->tbo.base.size, + *((unsigned long *)&bo->tbo.base.refcount)); + mutex_lock(&qdev->gem.mutex); + list_del_init(&bo->list); + mutex_unlock(&qdev->gem.mutex); + /* this should unref the ttm bo */ + drm_gem_object_put(&bo->tbo.base); + } +} + +int qxl_bo_init(struct qxl_device *qdev) +{ + return qxl_ttm_init(qdev); +} + +void qxl_bo_fini(struct qxl_device *qdev) +{ + qxl_ttm_fini(qdev); +} + +int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo) +{ + int ret; + + if (bo->type == QXL_GEM_DOMAIN_SURFACE && bo->surface_id == 0) { + /* allocate a surface id for this surface now */ + ret = qxl_surface_id_alloc(qdev, bo); + if (ret) + return ret; + + ret = qxl_hw_surface_alloc(qdev, bo); + if (ret) + return ret; + } + return 0; +} + +int qxl_surf_evict(struct qxl_device *qdev) +{ + struct ttm_resource_manager *man; + + man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV); + return ttm_resource_manager_evict_all(&qdev->mman.bdev, man); +} + +int qxl_vram_evict(struct qxl_device *qdev) +{ + struct ttm_resource_manager *man; + + man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM); + return ttm_resource_manager_evict_all(&qdev->mman.bdev, man); +} diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h new file mode 100644 index 000000000..53392cb90 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_object.h @@ -0,0 +1,75 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alon Levy + */ +#ifndef QXL_OBJECT_H +#define QXL_OBJECT_H + +#include "qxl_drv.h" + +static inline int qxl_bo_reserve(struct qxl_bo *bo) +{ + int r; + + r = ttm_bo_reserve(&bo->tbo, true, false, NULL); + if (unlikely(r != 0)) { + if (r != -ERESTARTSYS) { + struct drm_device *ddev = bo->tbo.base.dev; + + dev_err(ddev->dev, "%p reserve failed\n", bo); + } + return r; + } + return 0; +} + +static inline void qxl_bo_unreserve(struct qxl_bo *bo) +{ + ttm_bo_unreserve(&bo->tbo); +} + +static inline unsigned long qxl_bo_size(struct qxl_bo *bo) +{ + return bo->tbo.base.size; +} + +extern int qxl_bo_create(struct qxl_device *qdev, + unsigned long size, + bool kernel, bool pinned, u32 domain, + u32 priority, + struct qxl_surface *surf, + struct qxl_bo **bo_ptr); +int qxl_bo_vmap(struct qxl_bo *bo, struct iosys_map *map); +int qxl_bo_vmap_locked(struct qxl_bo *bo, struct iosys_map *map); +int qxl_bo_vunmap(struct qxl_bo *bo); +void qxl_bo_vunmap_locked(struct qxl_bo *bo); +void *qxl_bo_kmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, int page_offset); +void qxl_bo_kunmap_atomic_page(struct qxl_device *qdev, struct qxl_bo *bo, void *map); +extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo); +extern void qxl_bo_unref(struct qxl_bo **bo); +extern int qxl_bo_pin(struct qxl_bo *bo); +extern int qxl_bo_unpin(struct qxl_bo *bo); +extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain); +extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo); + +#endif diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c new file mode 100644 index 000000000..142d01415 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_prime.c @@ -0,0 +1,75 @@ +/* + * Copyright 2014 Canonical + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Andreas Pokorny + */ + +#include "qxl_drv.h" +#include "qxl_object.h" + +/* Empty Implementations as there should not be any other driver for a virtual + * device that might share buffers with qxl */ + +int qxl_gem_prime_pin(struct drm_gem_object *obj) +{ + struct qxl_bo *bo = gem_to_qxl_bo(obj); + + return qxl_bo_pin(bo); +} + +void qxl_gem_prime_unpin(struct drm_gem_object *obj) +{ + struct qxl_bo *bo = gem_to_qxl_bo(obj); + + qxl_bo_unpin(bo); +} + +struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + return ERR_PTR(-ENOSYS); +} + +struct drm_gem_object *qxl_gem_prime_import_sg_table( + struct drm_device *dev, struct dma_buf_attachment *attach, + struct sg_table *table) +{ + return ERR_PTR(-ENOSYS); +} + +int qxl_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map) +{ + struct qxl_bo *bo = gem_to_qxl_bo(obj); + int ret; + + ret = qxl_bo_vmap(bo, map); + if (ret < 0) + return ret; + + return 0; +} + +void qxl_gem_prime_vunmap(struct drm_gem_object *obj, + struct iosys_map *map) +{ + struct qxl_bo *bo = gem_to_qxl_bo(obj); + + qxl_bo_vunmap(bo); +} diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c new file mode 100644 index 000000000..368d26da0 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -0,0 +1,439 @@ +/* + * Copyright 2011 Red Hat, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * on the rights to use, copy, modify, merge, publish, distribute, sub + * license, and/or sell copies of the Software, and to permit persons to whom + * the Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include + +#include + +#include "qxl_drv.h" +#include "qxl_object.h" + +/* + * drawable cmd cache - allocate a bunch of VRAM pages, suballocate + * into 256 byte chunks for now - gives 16 cmds per page. + * + * use an ida to index into the chunks? + */ +/* manage releaseables */ +/* stack them 16 high for now -drawable object is 191 */ +#define RELEASE_SIZE 256 +#define RELEASES_PER_BO (PAGE_SIZE / RELEASE_SIZE) +/* put an alloc/dealloc surface cmd into one bo and round up to 128 */ +#define SURFACE_RELEASE_SIZE 128 +#define SURFACE_RELEASES_PER_BO (PAGE_SIZE / SURFACE_RELEASE_SIZE) + +static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; +static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; + +static const char *qxl_get_driver_name(struct dma_fence *fence) +{ + return "qxl"; +} + +static const char *qxl_get_timeline_name(struct dma_fence *fence) +{ + return "release"; +} + +static long qxl_fence_wait(struct dma_fence *fence, bool intr, + signed long timeout) +{ + struct qxl_device *qdev; + unsigned long cur, end = jiffies + timeout; + + qdev = container_of(fence->lock, struct qxl_device, release_lock); + + if (!wait_event_timeout(qdev->release_event, + (dma_fence_is_signaled(fence) || + (qxl_io_notify_oom(qdev), 0)), + timeout)) + return 0; + + cur = jiffies; + if (time_after(cur, end)) + return 0; + return end - cur; +} + +static const struct dma_fence_ops qxl_fence_ops = { + .get_driver_name = qxl_get_driver_name, + .get_timeline_name = qxl_get_timeline_name, + .wait = qxl_fence_wait, +}; + +static int +qxl_release_alloc(struct qxl_device *qdev, int type, + struct qxl_release **ret) +{ + struct qxl_release *release; + int handle; + size_t size = sizeof(*release); + + release = kmalloc(size, GFP_KERNEL); + if (!release) { + DRM_ERROR("Out of memory\n"); + return -ENOMEM; + } + release->base.ops = NULL; + release->type = type; + release->release_offset = 0; + release->surface_release_id = 0; + INIT_LIST_HEAD(&release->bos); + + idr_preload(GFP_KERNEL); + spin_lock(&qdev->release_idr_lock); + handle = idr_alloc(&qdev->release_idr, release, 1, 0, GFP_NOWAIT); + release->base.seqno = ++qdev->release_seqno; + spin_unlock(&qdev->release_idr_lock); + idr_preload_end(); + if (handle < 0) { + kfree(release); + *ret = NULL; + return handle; + } + *ret = release; + DRM_DEBUG_DRIVER("allocated release %d\n", handle); + release->id = handle; + return handle; +} + +static void +qxl_release_free_list(struct qxl_release *release) +{ + while (!list_empty(&release->bos)) { + struct qxl_bo_list *entry; + struct qxl_bo *bo; + + entry = container_of(release->bos.next, + struct qxl_bo_list, tv.head); + bo = to_qxl_bo(entry->tv.bo); + qxl_bo_unref(&bo); + list_del(&entry->tv.head); + kfree(entry); + } + release->release_bo = NULL; +} + +void +qxl_release_free(struct qxl_device *qdev, + struct qxl_release *release) +{ + DRM_DEBUG_DRIVER("release %d, type %d\n", release->id, release->type); + + if (release->surface_release_id) + qxl_surface_id_dealloc(qdev, release->surface_release_id); + + spin_lock(&qdev->release_idr_lock); + idr_remove(&qdev->release_idr, release->id); + spin_unlock(&qdev->release_idr_lock); + + if (release->base.ops) { + WARN_ON(list_empty(&release->bos)); + qxl_release_free_list(release); + + dma_fence_signal(&release->base); + dma_fence_put(&release->base); + } else { + qxl_release_free_list(release); + kfree(release); + } + atomic_dec(&qdev->release_count); +} + +static int qxl_release_bo_alloc(struct qxl_device *qdev, + struct qxl_bo **bo, + u32 priority) +{ + /* pin releases bo's they are too messy to evict */ + return qxl_bo_create(qdev, PAGE_SIZE, false, true, + QXL_GEM_DOMAIN_VRAM, priority, NULL, bo); +} + +int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo) +{ + struct qxl_bo_list *entry; + + list_for_each_entry(entry, &release->bos, tv.head) { + if (entry->tv.bo == &bo->tbo) + return 0; + } + + entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + qxl_bo_ref(bo); + entry->tv.bo = &bo->tbo; + entry->tv.num_shared = 0; + list_add_tail(&entry->tv.head, &release->bos); + return 0; +} + +static int qxl_release_validate_bo(struct qxl_bo *bo) +{ + struct ttm_operation_ctx ctx = { true, false }; + int ret; + + if (!bo->tbo.pin_count) { + qxl_ttm_placement_from_domain(bo, bo->type); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + if (ret) + return ret; + } + + ret = dma_resv_reserve_fences(bo->tbo.base.resv, 1); + if (ret) + return ret; + + /* allocate a surface for reserved + validated buffers */ + ret = qxl_bo_check_id(to_qxl(bo->tbo.base.dev), bo); + if (ret) + return ret; + return 0; +} + +int qxl_release_reserve_list(struct qxl_release *release, bool no_intr) +{ + int ret; + struct qxl_bo_list *entry; + + /* if only one object on the release its the release itself + since these objects are pinned no need to reserve */ + if (list_is_singular(&release->bos)) + return 0; + + ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, + !no_intr, NULL); + if (ret) + return ret; + + list_for_each_entry(entry, &release->bos, tv.head) { + struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); + + ret = qxl_release_validate_bo(bo); + if (ret) { + ttm_eu_backoff_reservation(&release->ticket, &release->bos); + return ret; + } + } + return 0; +} + +void qxl_release_backoff_reserve_list(struct qxl_release *release) +{ + /* if only one object on the release its the release itself + since these objects are pinned no need to reserve */ + if (list_is_singular(&release->bos)) + return; + + ttm_eu_backoff_reservation(&release->ticket, &release->bos); +} + +int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, + enum qxl_surface_cmd_type surface_cmd_type, + struct qxl_release *create_rel, + struct qxl_release **release) +{ + if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { + int idr_ret; + struct qxl_bo *bo; + union qxl_release_info *info; + + /* stash the release after the create command */ + idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); + if (idr_ret < 0) + return idr_ret; + bo = create_rel->release_bo; + + (*release)->release_bo = bo; + (*release)->release_offset = create_rel->release_offset + 64; + + qxl_release_list_add(*release, bo); + + info = qxl_release_map(qdev, *release); + info->id = idr_ret; + qxl_release_unmap(qdev, *release, info); + return 0; + } + + return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), + QXL_RELEASE_SURFACE_CMD, release, NULL); +} + +int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, + int type, struct qxl_release **release, + struct qxl_bo **rbo) +{ + struct qxl_bo *bo, *free_bo = NULL; + int idr_ret; + int ret = 0; + union qxl_release_info *info; + int cur_idx; + u32 priority; + + if (type == QXL_RELEASE_DRAWABLE) { + cur_idx = 0; + priority = 0; + } else if (type == QXL_RELEASE_SURFACE_CMD) { + cur_idx = 1; + priority = 1; + } else if (type == QXL_RELEASE_CURSOR_CMD) { + cur_idx = 2; + priority = 1; + } + else { + DRM_ERROR("got illegal type: %d\n", type); + return -EINVAL; + } + + idr_ret = qxl_release_alloc(qdev, type, release); + if (idr_ret < 0) { + if (rbo) + *rbo = NULL; + return idr_ret; + } + atomic_inc(&qdev->release_count); + + mutex_lock(&qdev->release_mutex); + if (qdev->current_release_bo_offset[cur_idx] + 1 >= releases_per_bo[cur_idx]) { + free_bo = qdev->current_release_bo[cur_idx]; + qdev->current_release_bo_offset[cur_idx] = 0; + qdev->current_release_bo[cur_idx] = NULL; + } + if (!qdev->current_release_bo[cur_idx]) { + ret = qxl_release_bo_alloc(qdev, &qdev->current_release_bo[cur_idx], priority); + if (ret) { + mutex_unlock(&qdev->release_mutex); + if (free_bo) { + qxl_bo_unpin(free_bo); + qxl_bo_unref(&free_bo); + } + qxl_release_free(qdev, *release); + return ret; + } + } + + bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); + + (*release)->release_bo = bo; + (*release)->release_offset = qdev->current_release_bo_offset[cur_idx] * release_size_per_bo[cur_idx]; + qdev->current_release_bo_offset[cur_idx]++; + + if (rbo) + *rbo = bo; + + mutex_unlock(&qdev->release_mutex); + if (free_bo) { + qxl_bo_unpin(free_bo); + qxl_bo_unref(&free_bo); + } + + ret = qxl_release_list_add(*release, bo); + qxl_bo_unref(&bo); + if (ret) { + qxl_release_free(qdev, *release); + return ret; + } + + info = qxl_release_map(qdev, *release); + info->id = idr_ret; + qxl_release_unmap(qdev, *release, info); + + return ret; +} + +struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, + uint64_t id) +{ + struct qxl_release *release; + + spin_lock(&qdev->release_idr_lock); + release = idr_find(&qdev->release_idr, id); + spin_unlock(&qdev->release_idr_lock); + if (!release) { + DRM_ERROR("failed to find id in release_idr\n"); + return NULL; + } + + return release; +} + +union qxl_release_info *qxl_release_map(struct qxl_device *qdev, + struct qxl_release *release) +{ + void *ptr; + union qxl_release_info *info; + struct qxl_bo *bo = release->release_bo; + + ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_MASK); + if (!ptr) + return NULL; + info = ptr + (release->release_offset & ~PAGE_MASK); + return info; +} + +void qxl_release_unmap(struct qxl_device *qdev, + struct qxl_release *release, + union qxl_release_info *info) +{ + struct qxl_bo *bo = release->release_bo; + void *ptr; + + ptr = ((void *)info) - (release->release_offset & ~PAGE_MASK); + qxl_bo_kunmap_atomic_page(qdev, bo, ptr); +} + +void qxl_release_fence_buffer_objects(struct qxl_release *release) +{ + struct ttm_buffer_object *bo; + struct ttm_device *bdev; + struct ttm_validate_buffer *entry; + struct qxl_device *qdev; + + /* if only one object on the release its the release itself + since these objects are pinned no need to reserve */ + if (list_is_singular(&release->bos) || list_empty(&release->bos)) + return; + + bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo; + bdev = bo->bdev; + qdev = container_of(bdev, struct qxl_device, mman.bdev); + + /* + * Since we never really allocated a context and we don't want to conflict, + * set the highest bits. This will break if we really allow exporting of dma-bufs. + */ + dma_fence_init(&release->base, &qxl_fence_ops, &qdev->release_lock, + release->id | 0xf0000000, release->base.seqno); + trace_dma_fence_emit(&release->base); + + list_for_each_entry(entry, &release->bos, head) { + bo = entry->bo; + + dma_resv_add_fence(bo->base.resv, &release->base, + DMA_RESV_USAGE_READ); + ttm_bo_move_to_lru_tail_unlocked(bo); + dma_resv_unlock(bo->base.resv); + } + ww_acquire_fini(&release->ticket); +} + diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c new file mode 100644 index 000000000..ee95001e6 --- /dev/null +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -0,0 +1,235 @@ +/* + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alon Levy + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qxl_drv.h" +#include "qxl_object.h" + +static struct qxl_device *qxl_get_qdev(struct ttm_device *bdev) +{ + struct qxl_mman *mman; + struct qxl_device *qdev; + + mman = container_of(bdev, struct qxl_mman, bdev); + qdev = container_of(mman, struct qxl_device, mman); + return qdev; +} + +static void qxl_evict_flags(struct ttm_buffer_object *bo, + struct ttm_placement *placement) +{ + struct qxl_bo *qbo; + static const struct ttm_place placements = { + .fpfn = 0, + .lpfn = 0, + .mem_type = TTM_PL_SYSTEM, + .flags = 0 + }; + + if (!qxl_ttm_bo_is_qxl_bo(bo)) { + placement->placement = &placements; + placement->busy_placement = &placements; + placement->num_placement = 1; + placement->num_busy_placement = 1; + return; + } + qbo = to_qxl_bo(bo); + qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU); + *placement = qbo->placement; +} + +int qxl_ttm_io_mem_reserve(struct ttm_device *bdev, + struct ttm_resource *mem) +{ + struct qxl_device *qdev = qxl_get_qdev(bdev); + + switch (mem->mem_type) { + case TTM_PL_SYSTEM: + /* system memory */ + return 0; + case TTM_PL_VRAM: + mem->bus.is_iomem = true; + mem->bus.offset = (mem->start << PAGE_SHIFT) + qdev->vram_base; + mem->bus.caching = ttm_write_combined; + break; + case TTM_PL_PRIV: + mem->bus.is_iomem = true; + mem->bus.offset = (mem->start << PAGE_SHIFT) + + qdev->surfaceram_base; + mem->bus.caching = ttm_write_combined; + break; + default: + return -EINVAL; + } + return 0; +} + +/* + * TTM backend functions. + */ +static void qxl_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) +{ + ttm_tt_fini(ttm); + kfree(ttm); +} + +static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo, + uint32_t page_flags) +{ + struct ttm_tt *ttm; + + ttm = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL); + if (ttm == NULL) + return NULL; + if (ttm_tt_init(ttm, bo, page_flags, ttm_cached, 0)) { + kfree(ttm); + return NULL; + } + return ttm; +} + +static void qxl_bo_move_notify(struct ttm_buffer_object *bo, + struct ttm_resource *new_mem) +{ + struct qxl_bo *qbo; + struct qxl_device *qdev; + + if (!qxl_ttm_bo_is_qxl_bo(bo) || !bo->resource) + return; + qbo = to_qxl_bo(bo); + qdev = to_qxl(qbo->tbo.base.dev); + + if (bo->resource->mem_type == TTM_PL_PRIV && qbo->surface_id) + qxl_surface_evict(qdev, qbo, new_mem ? true : false); +} + +static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *new_mem, + struct ttm_place *hop) +{ + struct ttm_resource *old_mem = bo->resource; + int ret; + + qxl_bo_move_notify(bo, new_mem); + + ret = ttm_bo_wait_ctx(bo, ctx); + if (ret) + return ret; + + if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { + ttm_bo_move_null(bo, new_mem); + return 0; + } + return ttm_bo_move_memcpy(bo, ctx, new_mem); +} + +static void qxl_bo_delete_mem_notify(struct ttm_buffer_object *bo) +{ + qxl_bo_move_notify(bo, NULL); +} + +static struct ttm_device_funcs qxl_bo_driver = { + .ttm_tt_create = &qxl_ttm_tt_create, + .ttm_tt_destroy = &qxl_ttm_backend_destroy, + .eviction_valuable = ttm_bo_eviction_valuable, + .evict_flags = &qxl_evict_flags, + .move = &qxl_bo_move, + .io_mem_reserve = &qxl_ttm_io_mem_reserve, + .delete_mem_notify = &qxl_bo_delete_mem_notify, +}; + +static int qxl_ttm_init_mem_type(struct qxl_device *qdev, + unsigned int type, + uint64_t size) +{ + return ttm_range_man_init(&qdev->mman.bdev, type, false, size); +} + +int qxl_ttm_init(struct qxl_device *qdev) +{ + int r; + int num_io_pages; /* != rom->num_io_pages, we include surface0 */ + + /* No others user of address space so set it to 0 */ + r = ttm_device_init(&qdev->mman.bdev, &qxl_bo_driver, NULL, + qdev->ddev.anon_inode->i_mapping, + qdev->ddev.vma_offset_manager, + false, false); + if (r) { + DRM_ERROR("failed initializing buffer object driver(%d).\n", r); + return r; + } + /* NOTE: this includes the framebuffer (aka surface 0) */ + num_io_pages = qdev->rom->ram_header_offset / PAGE_SIZE; + r = qxl_ttm_init_mem_type(qdev, TTM_PL_VRAM, num_io_pages); + if (r) { + DRM_ERROR("Failed initializing VRAM heap.\n"); + return r; + } + r = qxl_ttm_init_mem_type(qdev, TTM_PL_PRIV, + qdev->surfaceram_size / PAGE_SIZE); + if (r) { + DRM_ERROR("Failed initializing Surfaces heap.\n"); + return r; + } + DRM_INFO("qxl: %uM of VRAM memory size\n", + (unsigned int)qdev->vram_size / (1024 * 1024)); + DRM_INFO("qxl: %luM of IO pages memory ready (VRAM domain)\n", + ((unsigned int)num_io_pages * PAGE_SIZE) / (1024 * 1024)); + DRM_INFO("qxl: %uM of Surface memory size\n", + (unsigned int)qdev->surfaceram_size / (1024 * 1024)); + return 0; +} + +void qxl_ttm_fini(struct qxl_device *qdev) +{ + ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_VRAM); + ttm_range_man_fini(&qdev->mman.bdev, TTM_PL_PRIV); + ttm_device_fini(&qdev->mman.bdev); + DRM_INFO("qxl: ttm finalized\n"); +} + +void qxl_ttm_debugfs_init(struct qxl_device *qdev) +{ +#if defined(CONFIG_DEBUG_FS) + ttm_resource_manager_create_debugfs(ttm_manager_type(&qdev->mman.bdev, + TTM_PL_VRAM), + qdev->ddev.primary->debugfs_root, "qxl_mem_mm"); + ttm_resource_manager_create_debugfs(ttm_manager_type(&qdev->mman.bdev, + TTM_PL_PRIV), + qdev->ddev.primary->debugfs_root, "qxl_surf_mm"); +#endif +} -- cgit v1.2.3