diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/gpu/drm/virtio | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/virtio')
-rw-r--r-- | drivers/gpu/drm/virtio/Kconfig | 13 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/Makefile | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_debugfs.c | 111 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_display.c | 367 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_drv.c | 204 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_drv.h | 487 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_fence.c | 175 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_gem.c | 296 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_ioctl.c | 898 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_kms.c | 346 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_object.c | 249 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_plane.c | 394 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_prime.c | 170 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_trace.h | 52 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_trace_points.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_vq.c | 1298 | ||||
-rw-r--r-- | drivers/gpu/drm/virtio/virtgpu_vram.c | 228 |
17 files changed, 5304 insertions, 0 deletions
diff --git a/drivers/gpu/drm/virtio/Kconfig b/drivers/gpu/drm/virtio/Kconfig new file mode 100644 index 000000000..51ec7c324 --- /dev/null +++ b/drivers/gpu/drm/virtio/Kconfig @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0-only +config DRM_VIRTIO_GPU + tristate "Virtio GPU driver" + depends on DRM && VIRTIO_MENU && MMU + select VIRTIO + select DRM_KMS_HELPER + select DRM_GEM_SHMEM_HELPER + select VIRTIO_DMA_SHARED_BUFFER + help + This is the virtual GPU driver for virtio. It can be used with + QEMU based VMMs (like KVM or Xen). + + If unsure say M. diff --git a/drivers/gpu/drm/virtio/Makefile b/drivers/gpu/drm/virtio/Makefile new file mode 100644 index 000000000..b99fa4a73 --- /dev/null +++ b/drivers/gpu/drm/virtio/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the drm device driver. This driver provides support for the +# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher. + +virtio-gpu-y := virtgpu_drv.o virtgpu_kms.o virtgpu_gem.o virtgpu_vram.o \ + virtgpu_display.o virtgpu_vq.o \ + virtgpu_fence.o virtgpu_object.o virtgpu_debugfs.o virtgpu_plane.o \ + virtgpu_ioctl.o virtgpu_prime.o virtgpu_trace_points.o + +obj-$(CONFIG_DRM_VIRTIO_GPU) += virtio-gpu.o diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c new file mode 100644 index 000000000..853dd9aa3 --- /dev/null +++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c @@ -0,0 +1,111 @@ +/* + * Copyright (C) 2015 Red Hat, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/string_helpers.h> + +#include <drm/drm_debugfs.h> +#include <drm/drm_file.h> + +#include "virtgpu_drv.h" + +static void virtio_gpu_add_bool(struct seq_file *m, const char *name, + bool value) +{ + seq_printf(m, "%-16s : %s\n", name, str_yes_no(value)); +} + +static void virtio_gpu_add_int(struct seq_file *m, const char *name, int value) +{ + seq_printf(m, "%-16s : %d\n", name, value); +} + +static int virtio_gpu_features(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct virtio_gpu_device *vgdev = node->minor->dev->dev_private; + + virtio_gpu_add_bool(m, "virgl", vgdev->has_virgl_3d); + virtio_gpu_add_bool(m, "edid", vgdev->has_edid); + virtio_gpu_add_bool(m, "indirect", vgdev->has_indirect); + + virtio_gpu_add_bool(m, "resource uuid", + vgdev->has_resource_assign_uuid); + + virtio_gpu_add_bool(m, "blob resources", vgdev->has_resource_blob); + virtio_gpu_add_bool(m, "context init", vgdev->has_context_init); + virtio_gpu_add_int(m, "cap sets", vgdev->num_capsets); + virtio_gpu_add_int(m, "scanouts", vgdev->num_scanouts); + if (vgdev->host_visible_region.len) { + seq_printf(m, "%-16s : 0x%lx +0x%lx\n", "host visible region", + (unsigned long)vgdev->host_visible_region.addr, + (unsigned long)vgdev->host_visible_region.len); + } + return 0; +} + +static int +virtio_gpu_debugfs_irq_info(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *) m->private; + struct virtio_gpu_device *vgdev = node->minor->dev->dev_private; + + seq_printf(m, "fence %llu %lld\n", + (u64)atomic64_read(&vgdev->fence_drv.last_fence_id), + vgdev->fence_drv.current_fence_id); + return 0; +} + +static int +virtio_gpu_debugfs_host_visible_mm(struct seq_file *m, void *data) +{ + struct drm_info_node *node = (struct drm_info_node *)m->private; + struct virtio_gpu_device *vgdev = node->minor->dev->dev_private; + struct drm_printer p; + + if (!vgdev->has_host_visible) { + seq_puts(m, "Host allocations not visible to guest\n"); + return 0; + } + + p = drm_seq_file_printer(m); + drm_mm_print(&vgdev->host_visible_mm, &p); + return 0; +} + +static struct drm_info_list virtio_gpu_debugfs_list[] = { + { "virtio-gpu-features", virtio_gpu_features }, + { "virtio-gpu-irq-fence", virtio_gpu_debugfs_irq_info, 0, NULL }, + { "virtio-gpu-host-visible-mm", virtio_gpu_debugfs_host_visible_mm }, +}; + +#define VIRTIO_GPU_DEBUGFS_ENTRIES ARRAY_SIZE(virtio_gpu_debugfs_list) + +void +virtio_gpu_debugfs_init(struct drm_minor *minor) +{ + drm_debugfs_create_files(virtio_gpu_debugfs_list, + VIRTIO_GPU_DEBUGFS_ENTRIES, + minor->debugfs_root, minor); +} diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c new file mode 100644 index 000000000..9ea7611a9 --- /dev/null +++ b/drivers/gpu/drm/virtio/virtgpu_display.c @@ -0,0 +1,367 @@ +/* + * Copyright (C) 2015 Red Hat, Inc. + * All Rights Reserved. + * + * Authors: + * Dave Airlie + * Alon Levy + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_damage_helper.h> +#include <drm/drm_edid.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> + +#include "virtgpu_drv.h" + +#define XRES_MIN 32 +#define YRES_MIN 32 + +#define XRES_DEF 1024 +#define YRES_DEF 768 + +#define XRES_MAX 8192 +#define YRES_MAX 8192 + +#define drm_connector_to_virtio_gpu_output(x) \ + container_of(x, struct virtio_gpu_output, conn) + +static const struct drm_crtc_funcs virtio_gpu_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .destroy = drm_crtc_cleanup, + + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +static const struct drm_framebuffer_funcs virtio_gpu_fb_funcs = { + .create_handle = drm_gem_fb_create_handle, + .destroy = drm_gem_fb_destroy, + .dirty = drm_atomic_helper_dirtyfb, +}; + +static int +virtio_gpu_framebuffer_init(struct drm_device *dev, + struct virtio_gpu_framebuffer *vgfb, + const struct drm_mode_fb_cmd2 *mode_cmd, + struct drm_gem_object *obj) +{ + int ret; + + vgfb->base.obj[0] = obj; + + drm_helper_mode_fill_fb_struct(dev, &vgfb->base, mode_cmd); + + ret = drm_framebuffer_init(dev, &vgfb->base, &virtio_gpu_fb_funcs); + if (ret) { + vgfb->base.obj[0] = NULL; + return ret; + } + return 0; +} + +static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc); + + virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, + crtc->mode.hdisplay, + crtc->mode.vdisplay, 0, 0); + virtio_gpu_notify(vgdev); +} + +static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ +} + +static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_device *dev = crtc->dev; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc); + + virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0); + virtio_gpu_notify(vgdev); +} + +static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + return 0; +} + +static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc); + + /* + * virtio-gpu can't do modeset and plane update operations + * independent from each other. So the actual modeset happens + * in the plane update callback, and here we just check + * whenever we must force the modeset. + */ + if (drm_atomic_crtc_needs_modeset(crtc_state)) { + output->needs_modeset = true; + } +} + +static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = { + .mode_set_nofb = virtio_gpu_crtc_mode_set_nofb, + .atomic_check = virtio_gpu_crtc_atomic_check, + .atomic_flush = virtio_gpu_crtc_atomic_flush, + .atomic_enable = virtio_gpu_crtc_atomic_enable, + .atomic_disable = virtio_gpu_crtc_atomic_disable, +}; + +static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ +} + +static void virtio_gpu_enc_enable(struct drm_encoder *encoder) +{ +} + +static void virtio_gpu_enc_disable(struct drm_encoder *encoder) +{ +} + +static int virtio_gpu_conn_get_modes(struct drm_connector *connector) +{ + struct virtio_gpu_output *output = + drm_connector_to_virtio_gpu_output(connector); + struct drm_display_mode *mode = NULL; + int count, width, height; + + if (output->edid) { + count = drm_add_edid_modes(connector, output->edid); + if (count) + return count; + } + + width = le32_to_cpu(output->info.r.width); + height = le32_to_cpu(output->info.r.height); + count = drm_add_modes_noedid(connector, XRES_MAX, YRES_MAX); + + if (width == 0 || height == 0) { + drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF); + } else { + DRM_DEBUG("add mode: %dx%d\n", width, height); + mode = drm_cvt_mode(connector->dev, width, height, 60, + false, false, false); + if (!mode) + return count; + mode->type |= DRM_MODE_TYPE_PREFERRED; + drm_mode_probed_add(connector, mode); + count++; + } + + return count; +} + +static enum drm_mode_status virtio_gpu_conn_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + struct virtio_gpu_output *output = + drm_connector_to_virtio_gpu_output(connector); + int width, height; + + width = le32_to_cpu(output->info.r.width); + height = le32_to_cpu(output->info.r.height); + + if (!(mode->type & DRM_MODE_TYPE_PREFERRED)) + return MODE_OK; + if (mode->hdisplay == XRES_DEF && mode->vdisplay == YRES_DEF) + return MODE_OK; + if (mode->hdisplay <= width && mode->hdisplay >= width - 16 && + mode->vdisplay <= height && mode->vdisplay >= height - 16) + return MODE_OK; + + DRM_DEBUG("del mode: %dx%d\n", mode->hdisplay, mode->vdisplay); + return MODE_BAD; +} + +static const struct drm_encoder_helper_funcs virtio_gpu_enc_helper_funcs = { + .mode_set = virtio_gpu_enc_mode_set, + .enable = virtio_gpu_enc_enable, + .disable = virtio_gpu_enc_disable, +}; + +static const struct drm_connector_helper_funcs virtio_gpu_conn_helper_funcs = { + .get_modes = virtio_gpu_conn_get_modes, + .mode_valid = virtio_gpu_conn_mode_valid, +}; + +static enum drm_connector_status virtio_gpu_conn_detect( + struct drm_connector *connector, + bool force) +{ + struct virtio_gpu_output *output = + drm_connector_to_virtio_gpu_output(connector); + + if (output->info.enabled) + return connector_status_connected; + else + return connector_status_disconnected; +} + +static void virtio_gpu_conn_destroy(struct drm_connector *connector) +{ + drm_connector_unregister(connector); + drm_connector_cleanup(connector); +} + +static const struct drm_connector_funcs virtio_gpu_connector_funcs = { + .detect = virtio_gpu_conn_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = virtio_gpu_conn_destroy, + .reset = drm_atomic_helper_connector_reset, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static int vgdev_output_init(struct virtio_gpu_device *vgdev, int index) +{ + struct drm_device *dev = vgdev->ddev; + struct virtio_gpu_output *output = vgdev->outputs + index; + struct drm_connector *connector = &output->conn; + struct drm_encoder *encoder = &output->enc; + struct drm_crtc *crtc = &output->crtc; + struct drm_plane *primary, *cursor; + + output->index = index; + if (index == 0) { + output->info.enabled = cpu_to_le32(true); + output->info.r.width = cpu_to_le32(XRES_DEF); + output->info.r.height = cpu_to_le32(YRES_DEF); + } + + primary = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_PRIMARY, index); + if (IS_ERR(primary)) + return PTR_ERR(primary); + cursor = virtio_gpu_plane_init(vgdev, DRM_PLANE_TYPE_CURSOR, index); + if (IS_ERR(cursor)) + return PTR_ERR(cursor); + drm_crtc_init_with_planes(dev, crtc, primary, cursor, + &virtio_gpu_crtc_funcs, NULL); + drm_crtc_helper_add(crtc, &virtio_gpu_crtc_helper_funcs); + + drm_connector_init(dev, connector, &virtio_gpu_connector_funcs, + DRM_MODE_CONNECTOR_VIRTUAL); + drm_connector_helper_add(connector, &virtio_gpu_conn_helper_funcs); + if (vgdev->has_edid) + drm_connector_attach_edid_property(connector); + + drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL); + drm_encoder_helper_add(encoder, &virtio_gpu_enc_helper_funcs); + encoder->possible_crtcs = 1 << index; + + drm_connector_attach_encoder(connector, encoder); + drm_connector_register(connector); + return 0; +} + +static struct drm_framebuffer * +virtio_gpu_user_framebuffer_create(struct drm_device *dev, + struct drm_file *file_priv, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct drm_gem_object *obj = NULL; + struct virtio_gpu_framebuffer *virtio_gpu_fb; + int ret; + + if (mode_cmd->pixel_format != DRM_FORMAT_HOST_XRGB8888 && + mode_cmd->pixel_format != DRM_FORMAT_HOST_ARGB8888) + return ERR_PTR(-ENOENT); + + /* lookup object associated with res handle */ + obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); + if (!obj) + return ERR_PTR(-EINVAL); + + virtio_gpu_fb = kzalloc(sizeof(*virtio_gpu_fb), GFP_KERNEL); + if (virtio_gpu_fb == NULL) { + drm_gem_object_put(obj); + return ERR_PTR(-ENOMEM); + } + + ret = virtio_gpu_framebuffer_init(dev, virtio_gpu_fb, mode_cmd, obj); + if (ret) { + kfree(virtio_gpu_fb); + drm_gem_object_put(obj); + return NULL; + } + + return &virtio_gpu_fb->base; +} + +static const struct drm_mode_config_funcs virtio_gpu_mode_funcs = { + .fb_create = virtio_gpu_user_framebuffer_create, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev) +{ + int i, ret; + + ret = drmm_mode_config_init(vgdev->ddev); + if (ret) + return ret; + + vgdev->ddev->mode_config.quirk_addfb_prefer_host_byte_order = true; + vgdev->ddev->mode_config.funcs = &virtio_gpu_mode_funcs; + + /* modes will be validated against the framebuffer size */ + vgdev->ddev->mode_config.min_width = XRES_MIN; + vgdev->ddev->mode_config.min_height = YRES_MIN; + vgdev->ddev->mode_config.max_width = XRES_MAX; + vgdev->ddev->mode_config.max_height = YRES_MAX; + + vgdev->ddev->mode_config.fb_modifiers_not_supported = true; + + for (i = 0 ; i < vgdev->num_scanouts; ++i) + vgdev_output_init(vgdev, i); + + drm_mode_config_reset(vgdev->ddev); + return 0; +} + +void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev) +{ + int i; + + for (i = 0 ; i < vgdev->num_scanouts; ++i) + kfree(vgdev->outputs[i].edid); +} diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c new file mode 100644 index 000000000..0035affc3 --- /dev/null +++ b/drivers/gpu/drm/virtio/virtgpu_drv.c @@ -0,0 +1,204 @@ +/* + * Copyright (C) 2015 Red Hat, Inc. + * All Rights Reserved. + * + * Authors: + * Dave Airlie <airlied@redhat.com> + * Gerd Hoffmann <kraxel@redhat.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/poll.h> +#include <linux/wait.h> + +#include <drm/drm.h> +#include <drm/drm_aperture.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_file.h> + +#include "virtgpu_drv.h" + +static const struct drm_driver driver; + +static int virtio_gpu_modeset = -1; + +MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); +module_param_named(modeset, virtio_gpu_modeset, int, 0400); + +static int virtio_gpu_pci_quirk(struct drm_device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev->dev); + const char *pname = dev_name(&pdev->dev); + bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA; + int ret; + + DRM_INFO("pci: %s detected at %s\n", + vga ? "virtio-vga" : "virtio-gpu-pci", + pname); + if (vga) { + ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, &driver); + if (ret) + return ret; + } + + return 0; +} + +static int virtio_gpu_probe(struct virtio_device *vdev) +{ + struct drm_device *dev; + int ret; + + if (drm_firmware_drivers_only() && virtio_gpu_modeset == -1) + return -EINVAL; + + if (virtio_gpu_modeset == 0) + return -EINVAL; + + /* + * The virtio-gpu device is a virtual device that doesn't have DMA + * ops assigned to it, nor DMA mask set and etc. Its parent device + * is actual GPU device we want to use it for the DRM's device in + * order to benefit from using generic DRM APIs. + */ + dev = drm_dev_alloc(&driver, vdev->dev.parent); + if (IS_ERR(dev)) + return PTR_ERR(dev); + vdev->priv = dev; + + if (dev_is_pci(vdev->dev.parent)) { + ret = virtio_gpu_pci_quirk(dev); + if (ret) + goto err_free; + } + + ret = virtio_gpu_init(vdev, dev); + if (ret) + goto err_free; + + ret = drm_dev_register(dev, 0); + if (ret) + goto err_deinit; + + drm_fbdev_generic_setup(vdev->priv, 32); + return 0; + +err_deinit: + virtio_gpu_deinit(dev); +err_free: + drm_dev_put(dev); + return ret; +} + +static void virtio_gpu_remove(struct virtio_device *vdev) +{ + struct drm_device *dev = vdev->priv; + + drm_dev_unplug(dev); + drm_atomic_helper_shutdown(dev); + virtio_gpu_deinit(dev); + drm_dev_put(dev); +} + +static void virtio_gpu_config_changed(struct virtio_device *vdev) +{ + struct drm_device *dev = vdev->priv; + struct virtio_gpu_device *vgdev = dev->dev_private; + + schedule_work(&vgdev->config_changed_work); +} + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_GPU, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static unsigned int features[] = { +#ifdef __LITTLE_ENDIAN + /* + * Gallium command stream send by virgl is native endian. + * Because of that we only support little endian guests on + * little endian hosts. + */ + VIRTIO_GPU_F_VIRGL, +#endif + VIRTIO_GPU_F_EDID, + VIRTIO_GPU_F_RESOURCE_UUID, + VIRTIO_GPU_F_RESOURCE_BLOB, + VIRTIO_GPU_F_CONTEXT_INIT, +}; +static struct virtio_driver virtio_gpu_driver = { + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = virtio_gpu_probe, + .remove = virtio_gpu_remove, + .config_changed = virtio_gpu_config_changed +}; + +module_virtio_driver(virtio_gpu_driver); + +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_DESCRIPTION("Virtio GPU driver"); +MODULE_LICENSE("GPL and additional rights"); +MODULE_AUTHOR("Dave Airlie <airlied@redhat.com>"); +MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>"); +MODULE_AUTHOR("Alon Levy"); + +DEFINE_DRM_GEM_FOPS(virtio_gpu_driver_fops); + +static const struct drm_driver driver = { + .driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_RENDER | DRIVER_ATOMIC, + .open = virtio_gpu_driver_open, + .postclose = virtio_gpu_driver_postclose, + + .dumb_create = virtio_gpu_mode_dumb_create, + .dumb_map_offset = virtio_gpu_mode_dumb_mmap, + +#if defined(CONFIG_DEBUG_FS) + .debugfs_init = virtio_gpu_debugfs_init, +#endif + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_mmap = drm_gem_prime_mmap, + .gem_prime_import = virtgpu_gem_prime_import, + .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table, + + .gem_create_object = virtio_gpu_create_object, + .fops = &virtio_gpu_driver_fops, + + .ioctls = virtio_gpu_ioctls, + .num_ioctls = DRM_VIRTIO_NUM_IOCTLS, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, + + .release = virtio_gpu_release, +}; diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h new file mode 100644 index 000000000..9b9847059 --- /dev/null +++ b/drivers/gpu/drm/virtio/virtgpu_drv.h @@ -0,0 +1,487 @@ +/* + * Copyright (C) 2015 Red Hat, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef VIRTIO_DRV_H +#define VIRTIO_DRV_H + +#include <linux/dma-direction.h> +#include <linux/virtio.h> +#include <linux/virtio_ids.h> +#include <linux/virtio_config.h> +#include <linux/virtio_gpu.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_drv.h> +#include <drm/drm_encoder.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_gem.h> +#include <drm/drm_gem_shmem_helper.h> +#include <drm/drm_ioctl.h> +#include <drm/drm_probe_helper.h> +#include <drm/virtgpu_drm.h> + +#define DRIVER_NAME "virtio_gpu" +#define DRIVER_DESC "virtio GPU" +#define DRIVER_DATE "0" + +#define DRIVER_MAJOR 0 +#define DRIVER_MINOR 1 +#define DRIVER_PATCHLEVEL 0 + +#define STATE_INITIALIZING 0 +#define STATE_OK 1 +#define STATE_ERR 2 + +#define MAX_CAPSET_ID 63 +#define MAX_RINGS 64 + +struct virtio_gpu_object_params { + unsigned long size; + bool dumb; + /* 3d */ + bool virgl; + bool blob; + + /* classic resources only */ + uint32_t format; + uint32_t width; + uint32_t height; + uint32_t target; + uint32_t bind; + uint32_t depth; + uint32_t array_size; + uint32_t last_level; + uint32_t nr_samples; + uint32_t flags; + + /* blob resources only */ + uint32_t ctx_id; + uint32_t blob_mem; + uint32_t blob_flags; + uint64_t blob_id; +}; + +struct virtio_gpu_object { + struct drm_gem_shmem_object base; + uint32_t hw_res_handle; + bool dumb; + bool created; + bool host3d_blob, guest_blob; + uint32_t blob_mem, blob_flags; + + int uuid_state; + uuid_t uuid; +}; +#define gem_to_virtio_gpu_obj(gobj) \ + container_of((gobj), struct virtio_gpu_object, base.base) + +struct virtio_gpu_object_shmem { + struct virtio_gpu_object base; +}; + +struct virtio_gpu_object_vram { + struct virtio_gpu_object base; + uint32_t map_state; + uint32_t map_info; + struct drm_mm_node vram_node; +}; + +#define to_virtio_gpu_shmem(virtio_gpu_object) \ + container_of((virtio_gpu_object), struct virtio_gpu_object_shmem, base) + +#define to_virtio_gpu_vram(virtio_gpu_object) \ + container_of((virtio_gpu_object), struct virtio_gpu_object_vram, base) + +struct virtio_gpu_object_array { + struct ww_acquire_ctx ticket; + struct list_head next; + u32 nents, total; + struct drm_gem_object *objs[]; +}; + +struct virtio_gpu_vbuffer; +struct virtio_gpu_device; + +typedef void (*virtio_gpu_resp_cb)(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf); + +struct virtio_gpu_fence_driver { + atomic64_t last_fence_id; + uint64_t current_fence_id; + uint64_t context; + struct list_head fences; + spinlock_t lock; +}; + +struct virtio_gpu_fence_event { + struct drm_pending_event base; + struct drm_event event; +}; + +struct virtio_gpu_fence { + struct dma_fence f; + uint32_t ring_idx; + uint64_t fence_id; + bool emit_fence_info; + struct virtio_gpu_fence_event *e; + struct virtio_gpu_fence_driver *drv; + struct list_head node; +}; + +struct virtio_gpu_vbuffer { + char *buf; + int size; + + void *data_buf; + uint32_t data_size; + + char *resp_buf; + int resp_size; + virtio_gpu_resp_cb resp_cb; + void *resp_cb_data; + + struct virtio_gpu_object_array *objs; + struct list_head list; +}; + +struct virtio_gpu_output { + int index; + struct drm_crtc crtc; + struct drm_connector conn; + struct drm_encoder enc; + struct virtio_gpu_display_one info; + struct virtio_gpu_update_cursor cursor; + struct edid *edid; + int cur_x; + int cur_y; + bool needs_modeset; +}; +#define drm_crtc_to_virtio_gpu_output(x) \ + container_of(x, struct virtio_gpu_output, crtc) + +struct virtio_gpu_framebuffer { + struct drm_framebuffer base; + struct virtio_gpu_fence *fence; +}; +#define to_virtio_gpu_framebuffer(x) \ + container_of(x, struct virtio_gpu_framebuffer, base) + +struct virtio_gpu_queue { + struct virtqueue *vq; + spinlock_t qlock; + wait_queue_head_t ack_queue; + struct work_struct dequeue_work; +}; + +struct virtio_gpu_drv_capset { + uint32_t id; + uint32_t max_version; + uint32_t max_size; +}; + +struct virtio_gpu_drv_cap_cache { + struct list_head head; + void *caps_cache; + uint32_t id; + uint32_t version; + uint32_t size; + atomic_t is_valid; +}; + +struct virtio_gpu_device { + struct drm_device *ddev; + + struct virtio_device *vdev; + + struct virtio_gpu_output outputs[VIRTIO_GPU_MAX_SCANOUTS]; + uint32_t num_scanouts; + + struct virtio_gpu_queue ctrlq; + struct virtio_gpu_queue cursorq; + struct kmem_cache *vbufs; + + atomic_t pending_commands; + + struct ida resource_ida; + + wait_queue_head_t resp_wq; + /* current display info */ + spinlock_t display_info_lock; + bool display_info_pending; + + struct virtio_gpu_fence_driver fence_drv; + + struct ida ctx_id_ida; + + bool has_virgl_3d; + bool has_edid; + bool has_indirect; + bool has_resource_assign_uuid; + bool has_resource_blob; + bool has_host_visible; + bool has_context_init; + struct virtio_shm_region host_visible_region; + struct drm_mm host_visible_mm; + + struct work_struct config_changed_work; + + struct work_struct obj_free_work; + spinlock_t obj_free_lock; + struct list_head obj_free_list; + + struct virtio_gpu_drv_capset *capsets; + uint32_t num_capsets; + uint64_t capset_id_mask; + struct list_head cap_cache; + + /* protects uuid state when exporting */ + spinlock_t resource_export_lock; + /* protects map state and host_visible_mm */ + spinlock_t host_visible_lock; +}; + +struct virtio_gpu_fpriv { + uint32_t ctx_id; + uint32_t context_init; + bool context_created; + uint32_t num_rings; + uint64_t base_fence_ctx; + uint64_t ring_idx_mask; + struct mutex context_lock; +}; + +/* virtgpu_ioctl.c */ +#define DRM_VIRTIO_NUM_IOCTLS 12 +extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS]; +void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file); + +/* virtgpu_kms.c */ +int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev); +void virtio_gpu_deinit(struct drm_device *dev); +void virtio_gpu_release(struct drm_device *dev); +int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file); +void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file); + +/* virtgpu_gem.c */ +int virtio_gpu_gem_object_open(struct drm_gem_object *obj, + struct drm_file *file); +void virtio_gpu_gem_object_close(struct drm_gem_object *obj, + struct drm_file *file); +int virtio_gpu_mode_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args); +int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv, + struct drm_device *dev, + uint32_t handle, uint64_t *offset_p); + +struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents); +struct virtio_gpu_object_array* +virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents); +void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs, + struct drm_gem_object *obj); +int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs); +void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs); +void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs, + struct dma_fence *fence); +void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs); +void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_array *objs); +void virtio_gpu_array_put_free_work(struct work_struct *work); + +/* virtgpu_vq.c */ +int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev); +void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev); +void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo, + struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence); +void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo); +void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, + uint64_t offset, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence); +void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, + uint32_t resource_id, + uint32_t x, uint32_t y, + uint32_t width, uint32_t height, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence); +void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, + uint32_t scanout_id, uint32_t resource_id, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y); +void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *obj, + struct virtio_gpu_mem_entry *ents, + unsigned int nents); +int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev); +int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev); +void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, + struct virtio_gpu_output *output); +int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev); +int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx); +int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, + int idx, int version, + struct virtio_gpu_drv_cap_cache **cache_p); +int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev); +void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id, + uint32_t context_init, uint32_t nlen, + const char *name); +void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, + uint32_t id); +void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, + uint32_t ctx_id, + struct virtio_gpu_object_array *objs); +void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, + uint32_t ctx_id, + struct virtio_gpu_object_array *objs); +void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, + void *data, uint32_t data_size, + uint32_t ctx_id, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence); +void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, + uint32_t ctx_id, + uint64_t offset, uint32_t level, + uint32_t stride, + uint32_t layer_stride, + struct drm_virtgpu_3d_box *box, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence); +void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, + uint32_t ctx_id, + uint64_t offset, uint32_t level, + uint32_t stride, + uint32_t layer_stride, + struct drm_virtgpu_3d_box *box, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence); +void +virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo, + struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence); +void virtio_gpu_ctrl_ack(struct virtqueue *vq); +void virtio_gpu_cursor_ack(struct virtqueue *vq); +void virtio_gpu_fence_ack(struct virtqueue *vq); +void virtio_gpu_dequeue_ctrl_func(struct work_struct *work); +void virtio_gpu_dequeue_cursor_func(struct work_struct *work); +void virtio_gpu_dequeue_fence_func(struct work_struct *work); + +void virtio_gpu_notify(struct virtio_gpu_device *vgdev); + +int +virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_array *objs); + +int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_array *objs, uint64_t offset); + +void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo); + +void +virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo, + struct virtio_gpu_object_params *params, + struct virtio_gpu_mem_entry *ents, + uint32_t nents); +void +virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev, + uint32_t scanout_id, + struct virtio_gpu_object *bo, + struct drm_framebuffer *fb, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y); + +/* virtgpu_display.c */ +int virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev); +void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev); + +/* virtgpu_plane.c */ +uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc); +struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, + enum drm_plane_type type, + int index); + +/* virtgpu_fence.c */ +struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev, + uint64_t base_fence_ctx, + uint32_t ring_idx); +void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, + struct virtio_gpu_ctrl_hdr *cmd_hdr, + struct virtio_gpu_fence *fence); +void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev, + u64 fence_id); + +/* virtgpu_object.c */ +void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo); +struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, + size_t size); +int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_params *params, + struct virtio_gpu_object **bo_ptr, + struct virtio_gpu_fence *fence); + +bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo); + +int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, + uint32_t *resid); +/* virtgpu_prime.c */ +int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo); +struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj, + int flags); +struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev, + struct dma_buf *buf); +int virtgpu_gem_prime_get_uuid(struct drm_gem_object *obj, + uuid_t *uuid); +struct drm_gem_object *virtgpu_gem_prime_import_sg_table( + struct drm_device *dev, struct dma_buf_attachment *attach, + struct sg_table *sgt); + +/* virtgpu_debugfs.c */ +void virtio_gpu_debugfs_init(struct drm_minor *minor); + +/* virtgpu_vram.c */ +bool virtio_gpu_is_vram(struct virtio_gpu_object *bo); +int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_params *params, + struct virtio_gpu_object **bo_ptr); +struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo, + struct device *dev, + enum dma_data_direction dir); +void virtio_gpu_vram_unmap_dma_buf(struct device *dev, + struct sg_table *sgt, + enum dma_data_direction dir); + +#endif diff --git a/drivers/gpu/drm/virtio/virtgpu_fence.c b/drivers/gpu/drm/virtio/virtgpu_fence.c new file mode 100644 index 000000000..f28357dbd --- /dev/null +++ b/drivers/gpu/drm/virtio/virtgpu_fence.c @@ -0,0 +1,175 @@ +/* + * Copyright (C) 2015 Red Hat, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <trace/events/dma_fence.h> + +#include "virtgpu_drv.h" + +#define to_virtio_gpu_fence(x) \ + container_of(x, struct virtio_gpu_fence, f) + +static const char *virtio_gpu_get_driver_name(struct dma_fence *f) +{ + return "virtio_gpu"; +} + +static const char *virtio_gpu_get_timeline_name(struct dma_fence *f) +{ + return "controlq"; +} + +static bool virtio_gpu_fence_signaled(struct dma_fence *f) +{ + /* leaked fence outside driver before completing + * initialization with virtio_gpu_fence_emit. + */ + WARN_ON_ONCE(f->seqno == 0); + return false; +} + +static void virtio_gpu_fence_value_str(struct dma_fence *f, char *str, int size) +{ + snprintf(str, size, "[%llu, %llu]", f->context, f->seqno); +} + +static void virtio_gpu_timeline_value_str(struct dma_fence *f, char *str, + int size) +{ + struct virtio_gpu_fence *fence = to_virtio_gpu_fence(f); + + snprintf(str, size, "%llu", + (u64)atomic64_read(&fence->drv->last_fence_id)); +} + +static const struct dma_fence_ops virtio_gpu_fence_ops = { + .get_driver_name = virtio_gpu_get_driver_name, + .get_timeline_name = virtio_gpu_get_timeline_name, + .signaled = virtio_gpu_fence_signaled, + .fence_value_str = virtio_gpu_fence_value_str, + .timeline_value_str = virtio_gpu_timeline_value_str, +}; + +struct virtio_gpu_fence *virtio_gpu_fence_alloc(struct virtio_gpu_device *vgdev, + uint64_t base_fence_ctx, + uint32_t ring_idx) +{ + uint64_t fence_context = base_fence_ctx + ring_idx; + struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; + struct virtio_gpu_fence *fence = kzalloc(sizeof(struct virtio_gpu_fence), + GFP_KERNEL); + + if (!fence) + return fence; + + fence->drv = drv; + fence->ring_idx = ring_idx; + fence->emit_fence_info = !(base_fence_ctx == drv->context); + + /* This only partially initializes the fence because the seqno is + * unknown yet. The fence must not be used outside of the driver + * until virtio_gpu_fence_emit is called. + */ + + dma_fence_init(&fence->f, &virtio_gpu_fence_ops, &drv->lock, + fence_context, 0); + + return fence; +} + +void virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev, + struct virtio_gpu_ctrl_hdr *cmd_hdr, + struct virtio_gpu_fence *fence) +{ + struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; + unsigned long irq_flags; + + spin_lock_irqsave(&drv->lock, irq_flags); + fence->fence_id = fence->f.seqno = ++drv->current_fence_id; + dma_fence_get(&fence->f); + list_add_tail(&fence->node, &drv->fences); + spin_unlock_irqrestore(&drv->lock, irq_flags); + + trace_dma_fence_emit(&fence->f); + + cmd_hdr->flags |= cpu_to_le32(VIRTIO_GPU_FLAG_FENCE); + cmd_hdr->fence_id = cpu_to_le64(fence->fence_id); + + /* Only currently defined fence param. */ + if (fence->emit_fence_info) { + cmd_hdr->flags |= + cpu_to_le32(VIRTIO_GPU_FLAG_INFO_RING_IDX); + cmd_hdr->ring_idx = (u8)fence->ring_idx; + } +} + +void virtio_gpu_fence_event_process(struct virtio_gpu_device *vgdev, + u64 fence_id) +{ + struct virtio_gpu_fence_driver *drv = &vgdev->fence_drv; + struct virtio_gpu_fence *signaled, *curr, *tmp; + unsigned long irq_flags; + + spin_lock_irqsave(&drv->lock, irq_flags); + atomic64_set(&vgdev->fence_drv.last_fence_id, fence_id); + list_for_each_entry_safe(curr, tmp, &drv->fences, node) { + if (fence_id != curr->fence_id) + continue; + + signaled = curr; + + /* + * Signal any fences with a strictly smaller sequence number + * than the current signaled fence. + */ + list_for_each_entry_safe(curr, tmp, &drv->fences, node) { + /* dma-fence contexts must match */ + if (signaled->f.context != curr->f.context) + continue; + + if (!dma_fence_is_later(&signaled->f, &curr->f)) + continue; + + dma_fence_signal_locked(&curr->f); + if (curr->e) { + drm_send_event(vgdev->ddev, &curr->e->base); + curr->e = NULL; + } + + list_del(&curr->node); + dma_fence_put(&curr->f); + } + + dma_fence_signal_locked(&signaled->f); + if (signaled->e) { + drm_send_event(vgdev->ddev, &signaled->e->base); + signaled->e = NULL; + } + + list_del(&signaled->node); + dma_fence_put(&signaled->f); + break; + } + spin_unlock_irqrestore(&drv->lock, irq_flags); +} diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c new file mode 100644 index 000000000..7db48d17e --- /dev/null +++ b/drivers/gpu/drm/virtio/virtgpu_gem.c @@ -0,0 +1,296 @@ +/* + * Copyright (C) 2015 Red Hat, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <drm/drm_file.h> +#include <drm/drm_fourcc.h> + +#include "virtgpu_drv.h" + +static int virtio_gpu_gem_create(struct drm_file *file, + struct drm_device *dev, + struct virtio_gpu_object_params *params, + struct drm_gem_object **obj_p, + uint32_t *handle_p) +{ + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_object *obj; + int ret; + u32 handle; + + ret = virtio_gpu_object_create(vgdev, params, &obj, NULL); + if (ret < 0) + return ret; + + ret = drm_gem_handle_create(file, &obj->base.base, &handle); + if (ret) { + drm_gem_object_release(&obj->base.base); + return ret; + } + + *obj_p = &obj->base.base; + + /* drop reference from allocate - handle holds it now */ + drm_gem_object_put(&obj->base.base); + + *handle_p = handle; + return 0; +} + +int virtio_gpu_mode_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + struct drm_gem_object *gobj; + struct virtio_gpu_object_params params = { 0 }; + struct virtio_gpu_device *vgdev = dev->dev_private; + int ret; + uint32_t pitch; + + if (args->bpp != 32) + return -EINVAL; + + pitch = args->width * 4; + args->size = pitch * args->height; + args->size = ALIGN(args->size, PAGE_SIZE); + + params.format = virtio_gpu_translate_format(DRM_FORMAT_HOST_XRGB8888); + params.width = args->width; + params.height = args->height; + params.size = args->size; + params.dumb = true; + + if (vgdev->has_resource_blob && !vgdev->has_virgl_3d) { + params.blob_mem = VIRTGPU_BLOB_MEM_GUEST; + params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE; + params.blob = true; + } + + ret = virtio_gpu_gem_create(file_priv, dev, ¶ms, &gobj, + &args->handle); + if (ret) + goto fail; + + args->pitch = pitch; + return ret; + +fail: + return ret; +} + +int virtio_gpu_mode_dumb_mmap(struct drm_file *file_priv, + struct drm_device *dev, + uint32_t handle, uint64_t *offset_p) +{ + struct drm_gem_object *gobj; + + BUG_ON(!offset_p); + gobj = drm_gem_object_lookup(file_priv, handle); + if (gobj == NULL) + return -ENOENT; + *offset_p = drm_vma_node_offset_addr(&gobj->vma_node); + drm_gem_object_put(gobj); + return 0; +} + +int virtio_gpu_gem_object_open(struct drm_gem_object *obj, + struct drm_file *file) +{ + struct virtio_gpu_device *vgdev = obj->dev->dev_private; + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; + struct virtio_gpu_object_array *objs; + + if (!vgdev->has_virgl_3d) + goto out_notify; + + /* the context might still be missing when the first ioctl is + * DRM_IOCTL_MODE_CREATE_DUMB or DRM_IOCTL_PRIME_FD_TO_HANDLE + */ + virtio_gpu_create_context(obj->dev, file); + + objs = virtio_gpu_array_alloc(1); + if (!objs) + return -ENOMEM; + virtio_gpu_array_add_obj(objs, obj); + + virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id, + objs); +out_notify: + virtio_gpu_notify(vgdev); + return 0; +} + +void virtio_gpu_gem_object_close(struct drm_gem_object *obj, + struct drm_file *file) +{ + struct virtio_gpu_device *vgdev = obj->dev->dev_private; + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; + struct virtio_gpu_object_array *objs; + + if (!vgdev->has_virgl_3d) + return; + + objs = virtio_gpu_array_alloc(1); + if (!objs) + return; + virtio_gpu_array_add_obj(objs, obj); + + virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id, + objs); + virtio_gpu_notify(vgdev); +} + +struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents) +{ + struct virtio_gpu_object_array *objs; + + objs = kmalloc(struct_size(objs, objs, nents), GFP_KERNEL); + if (!objs) + return NULL; + + objs->nents = 0; + objs->total = nents; + return objs; +} + +static void virtio_gpu_array_free(struct virtio_gpu_object_array *objs) +{ + kfree(objs); +} + +struct virtio_gpu_object_array* +virtio_gpu_array_from_handles(struct drm_file *drm_file, u32 *handles, u32 nents) +{ + struct virtio_gpu_object_array *objs; + u32 i; + + objs = virtio_gpu_array_alloc(nents); + if (!objs) + return NULL; + + for (i = 0; i < nents; i++) { + objs->objs[i] = drm_gem_object_lookup(drm_file, handles[i]); + if (!objs->objs[i]) { + objs->nents = i; + virtio_gpu_array_put_free(objs); + return NULL; + } + } + objs->nents = i; + return objs; +} + +void virtio_gpu_array_add_obj(struct virtio_gpu_object_array *objs, + struct drm_gem_object *obj) +{ + if (WARN_ON_ONCE(objs->nents == objs->total)) + return; + + drm_gem_object_get(obj); + objs->objs[objs->nents] = obj; + objs->nents++; +} + +int virtio_gpu_array_lock_resv(struct virtio_gpu_object_array *objs) +{ + unsigned int i; + int ret; + + if (objs->nents == 1) { + ret = dma_resv_lock_interruptible(objs->objs[0]->resv, NULL); + } else { + ret = drm_gem_lock_reservations(objs->objs, objs->nents, + &objs->ticket); + } + if (ret) + return ret; + + for (i = 0; i < objs->nents; ++i) { + ret = dma_resv_reserve_fences(objs->objs[i]->resv, 1); + if (ret) { + virtio_gpu_array_unlock_resv(objs); + return ret; + } + } + return ret; +} + +void virtio_gpu_array_unlock_resv(struct virtio_gpu_object_array *objs) +{ + if (objs->nents == 1) { + dma_resv_unlock(objs->objs[0]->resv); + } else { + drm_gem_unlock_reservations(objs->objs, objs->nents, + &objs->ticket); + } +} + +void virtio_gpu_array_add_fence(struct virtio_gpu_object_array *objs, + struct dma_fence *fence) +{ + int i; + + for (i = 0; i < objs->nents; i++) + dma_resv_add_fence(objs->objs[i]->resv, fence, + DMA_RESV_USAGE_WRITE); +} + +void virtio_gpu_array_put_free(struct virtio_gpu_object_array *objs) +{ + u32 i; + + if (!objs) + return; + + for (i = 0; i < objs->nents; i++) + drm_gem_object_put(objs->objs[i]); + virtio_gpu_array_free(objs); +} + +void virtio_gpu_array_put_free_delayed(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_array *objs) +{ + spin_lock(&vgdev->obj_free_lock); + list_add_tail(&objs->next, &vgdev->obj_free_list); + spin_unlock(&vgdev->obj_free_lock); + schedule_work(&vgdev->obj_free_work); +} + +void virtio_gpu_array_put_free_work(struct work_struct *work) +{ + struct virtio_gpu_device *vgdev = + container_of(work, struct virtio_gpu_device, obj_free_work); + struct virtio_gpu_object_array *objs; + + spin_lock(&vgdev->obj_free_lock); + while (!list_empty(&vgdev->obj_free_list)) { + objs = list_first_entry(&vgdev->obj_free_list, + struct virtio_gpu_object_array, next); + list_del(&objs->next); + spin_unlock(&vgdev->obj_free_lock); + virtio_gpu_array_put_free(objs); + spin_lock(&vgdev->obj_free_lock); + } + spin_unlock(&vgdev->obj_free_lock); +} diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c new file mode 100644 index 000000000..bc8c1e9a8 --- /dev/null +++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c @@ -0,0 +1,898 @@ +/* + * Copyright (C) 2015 Red Hat, Inc. + * All Rights Reserved. + * + * Authors: + * Dave Airlie + * Alon Levy + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/file.h> +#include <linux/sync_file.h> +#include <linux/uaccess.h> + +#include <drm/drm_file.h> +#include <drm/virtgpu_drm.h> + +#include "virtgpu_drv.h" + +#define VIRTGPU_BLOB_FLAG_USE_MASK (VIRTGPU_BLOB_FLAG_USE_MAPPABLE | \ + VIRTGPU_BLOB_FLAG_USE_SHAREABLE | \ + VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) + +static int virtio_gpu_fence_event_create(struct drm_device *dev, + struct drm_file *file, + struct virtio_gpu_fence *fence, + uint32_t ring_idx) +{ + struct virtio_gpu_fence_event *e = NULL; + int ret; + + e = kzalloc(sizeof(*e), GFP_KERNEL); + if (!e) + return -ENOMEM; + + e->event.type = VIRTGPU_EVENT_FENCE_SIGNALED; + e->event.length = sizeof(e->event); + + ret = drm_event_reserve_init(dev, file, &e->base, &e->event); + if (ret) + goto free; + + fence->e = e; + return 0; +free: + kfree(e); + return ret; +} + +/* Must be called with &virtio_gpu_fpriv.struct_mutex held. */ +static void virtio_gpu_create_context_locked(struct virtio_gpu_device *vgdev, + struct virtio_gpu_fpriv *vfpriv) +{ + char dbgname[TASK_COMM_LEN]; + + get_task_comm(dbgname, current); + virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id, + vfpriv->context_init, strlen(dbgname), + dbgname); + + vfpriv->context_created = true; +} + +void virtio_gpu_create_context(struct drm_device *dev, struct drm_file *file) +{ + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; + + mutex_lock(&vfpriv->context_lock); + if (vfpriv->context_created) + goto out_unlock; + + virtio_gpu_create_context_locked(vgdev, vfpriv); + +out_unlock: + mutex_unlock(&vfpriv->context_lock); +} + +static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct virtio_gpu_device *vgdev = dev->dev_private; + struct drm_virtgpu_map *virtio_gpu_map = data; + + return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev, + virtio_gpu_map->handle, + &virtio_gpu_map->offset); +} + +/* + * Usage of execbuffer: + * Relocations need to take into account the full VIRTIO_GPUDrawable size. + * However, the command as passed from user space must *not* contain the initial + * VIRTIO_GPUReleaseInfo struct (first XXX bytes) + */ +static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_virtgpu_execbuffer *exbuf = data; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; + struct virtio_gpu_fence *out_fence; + bool drm_fence_event; + int ret; + uint32_t *bo_handles = NULL; + void __user *user_bo_handles = NULL; + struct virtio_gpu_object_array *buflist = NULL; + struct sync_file *sync_file; + int out_fence_fd = -1; + void *buf; + uint64_t fence_ctx; + uint32_t ring_idx; + + fence_ctx = vgdev->fence_drv.context; + ring_idx = 0; + + if (vgdev->has_virgl_3d == false) + return -ENOSYS; + + if ((exbuf->flags & ~VIRTGPU_EXECBUF_FLAGS)) + return -EINVAL; + + if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX)) { + if (exbuf->ring_idx >= vfpriv->num_rings) + return -EINVAL; + + if (!vfpriv->base_fence_ctx) + return -EINVAL; + + fence_ctx = vfpriv->base_fence_ctx; + ring_idx = exbuf->ring_idx; + } + + virtio_gpu_create_context(dev, file); + if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) { + struct dma_fence *in_fence; + + in_fence = sync_file_get_fence(exbuf->fence_fd); + + if (!in_fence) + return -EINVAL; + + /* + * Wait if the fence is from a foreign context, or if the fence + * array contains any fence from a foreign context. + */ + ret = 0; + if (!dma_fence_match_context(in_fence, fence_ctx + ring_idx)) + ret = dma_fence_wait(in_fence, true); + + dma_fence_put(in_fence); + if (ret) + return ret; + } + + if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) { + out_fence_fd = get_unused_fd_flags(O_CLOEXEC); + if (out_fence_fd < 0) + return out_fence_fd; + } + + if (exbuf->num_bo_handles) { + bo_handles = kvmalloc_array(exbuf->num_bo_handles, + sizeof(uint32_t), GFP_KERNEL); + if (!bo_handles) { + ret = -ENOMEM; + goto out_unused_fd; + } + + user_bo_handles = u64_to_user_ptr(exbuf->bo_handles); + if (copy_from_user(bo_handles, user_bo_handles, + exbuf->num_bo_handles * sizeof(uint32_t))) { + ret = -EFAULT; + goto out_unused_fd; + } + + buflist = virtio_gpu_array_from_handles(file, bo_handles, + exbuf->num_bo_handles); + if (!buflist) { + ret = -ENOENT; + goto out_unused_fd; + } + kvfree(bo_handles); + bo_handles = NULL; + } + + buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size); + if (IS_ERR(buf)) { + ret = PTR_ERR(buf); + goto out_unused_fd; + } + + if (buflist) { + ret = virtio_gpu_array_lock_resv(buflist); + if (ret) + goto out_memdup; + } + + if ((exbuf->flags & VIRTGPU_EXECBUF_RING_IDX) && + (vfpriv->ring_idx_mask & BIT_ULL(ring_idx))) + drm_fence_event = true; + else + drm_fence_event = false; + + if ((exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_OUT) || + exbuf->num_bo_handles || + drm_fence_event) + out_fence = virtio_gpu_fence_alloc(vgdev, fence_ctx, ring_idx); + else + out_fence = NULL; + + if (drm_fence_event) { + ret = virtio_gpu_fence_event_create(dev, file, out_fence, ring_idx); + if (ret) + goto out_unresv; + } + + if (out_fence_fd >= 0) { + sync_file = sync_file_create(&out_fence->f); + if (!sync_file) { + dma_fence_put(&out_fence->f); + ret = -ENOMEM; + goto out_unresv; + } + + exbuf->fence_fd = out_fence_fd; + fd_install(out_fence_fd, sync_file->file); + } + + virtio_gpu_cmd_submit(vgdev, buf, exbuf->size, + vfpriv->ctx_id, buflist, out_fence); + dma_fence_put(&out_fence->f); + virtio_gpu_notify(vgdev); + return 0; + +out_unresv: + if (buflist) + virtio_gpu_array_unlock_resv(buflist); +out_memdup: + kvfree(buf); +out_unused_fd: + kvfree(bo_handles); + if (buflist) + virtio_gpu_array_put_free(buflist); + + if (out_fence_fd >= 0) + put_unused_fd(out_fence_fd); + + return ret; +} + +static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct virtio_gpu_device *vgdev = dev->dev_private; + struct drm_virtgpu_getparam *param = data; + int value; + + switch (param->param) { + case VIRTGPU_PARAM_3D_FEATURES: + value = vgdev->has_virgl_3d ? 1 : 0; + break; + case VIRTGPU_PARAM_CAPSET_QUERY_FIX: + value = 1; + break; + case VIRTGPU_PARAM_RESOURCE_BLOB: + value = vgdev->has_resource_blob ? 1 : 0; + break; + case VIRTGPU_PARAM_HOST_VISIBLE: + value = vgdev->has_host_visible ? 1 : 0; + break; + case VIRTGPU_PARAM_CROSS_DEVICE: + value = vgdev->has_resource_assign_uuid ? 1 : 0; + break; + case VIRTGPU_PARAM_CONTEXT_INIT: + value = vgdev->has_context_init ? 1 : 0; + break; + case VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs: + value = vgdev->capset_id_mask; + break; + default: + return -EINVAL; + } + if (copy_to_user(u64_to_user_ptr(param->value), &value, sizeof(int))) + return -EFAULT; + + return 0; +} + +static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct virtio_gpu_device *vgdev = dev->dev_private; + struct drm_virtgpu_resource_create *rc = data; + struct virtio_gpu_fence *fence; + int ret; + struct virtio_gpu_object *qobj; + struct drm_gem_object *obj; + uint32_t handle = 0; + struct virtio_gpu_object_params params = { 0 }; + + if (vgdev->has_virgl_3d) { + virtio_gpu_create_context(dev, file); + params.virgl = true; + params.target = rc->target; + params.bind = rc->bind; + params.depth = rc->depth; + params.array_size = rc->array_size; + params.last_level = rc->last_level; + params.nr_samples = rc->nr_samples; + params.flags = rc->flags; + } else { + if (rc->depth > 1) + return -EINVAL; + if (rc->nr_samples > 1) + return -EINVAL; + if (rc->last_level > 1) + return -EINVAL; + if (rc->target != 2) + return -EINVAL; + if (rc->array_size > 1) + return -EINVAL; + } + + params.format = rc->format; + params.width = rc->width; + params.height = rc->height; + params.size = rc->size; + /* allocate a single page size object */ + if (params.size == 0) + params.size = PAGE_SIZE; + + fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0); + if (!fence) + return -ENOMEM; + ret = virtio_gpu_object_create(vgdev, ¶ms, &qobj, fence); + dma_fence_put(&fence->f); + if (ret < 0) + return ret; + obj = &qobj->base.base; + + ret = drm_gem_handle_create(file, obj, &handle); + if (ret) { + drm_gem_object_release(obj); + return ret; + } + + rc->res_handle = qobj->hw_res_handle; /* similiar to a VM address */ + rc->bo_handle = handle; + + /* + * The handle owns the reference now. But we must drop our + * remaining reference *after* we no longer need to dereference + * the obj. Otherwise userspace could guess the handle and + * race closing it from another thread. + */ + drm_gem_object_put(obj); + + return 0; +} + +static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_virtgpu_resource_info *ri = data; + struct drm_gem_object *gobj = NULL; + struct virtio_gpu_object *qobj = NULL; + + gobj = drm_gem_object_lookup(file, ri->bo_handle); + if (gobj == NULL) + return -ENOENT; + + qobj = gem_to_virtio_gpu_obj(gobj); + + ri->size = qobj->base.base.size; + ri->res_handle = qobj->hw_res_handle; + if (qobj->host3d_blob || qobj->guest_blob) + ri->blob_mem = qobj->blob_mem; + + drm_gem_object_put(gobj); + return 0; +} + +static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file) +{ + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; + struct drm_virtgpu_3d_transfer_from_host *args = data; + struct virtio_gpu_object *bo; + struct virtio_gpu_object_array *objs; + struct virtio_gpu_fence *fence; + int ret; + u32 offset = args->offset; + + if (vgdev->has_virgl_3d == false) + return -ENOSYS; + + virtio_gpu_create_context(dev, file); + objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1); + if (objs == NULL) + return -ENOENT; + + bo = gem_to_virtio_gpu_obj(objs->objs[0]); + if (bo->guest_blob && !bo->host3d_blob) { + ret = -EINVAL; + goto err_put_free; + } + + if (!bo->host3d_blob && (args->stride || args->layer_stride)) { + ret = -EINVAL; + goto err_put_free; + } + + ret = virtio_gpu_array_lock_resv(objs); + if (ret != 0) + goto err_put_free; + + fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, 0); + if (!fence) { + ret = -ENOMEM; + goto err_unlock; + } + + virtio_gpu_cmd_transfer_from_host_3d + (vgdev, vfpriv->ctx_id, offset, args->level, args->stride, + args->layer_stride, &args->box, objs, fence); + dma_fence_put(&fence->f); + virtio_gpu_notify(vgdev); + return 0; + +err_unlock: + virtio_gpu_array_unlock_resv(objs); +err_put_free: + virtio_gpu_array_put_free(objs); + return ret; +} + +static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; + struct drm_virtgpu_3d_transfer_to_host *args = data; + struct virtio_gpu_object *bo; + struct virtio_gpu_object_array *objs; + struct virtio_gpu_fence *fence; + int ret; + u32 offset = args->offset; + + objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1); + if (objs == NULL) + return -ENOENT; + + bo = gem_to_virtio_gpu_obj(objs->objs[0]); + if (bo->guest_blob && !bo->host3d_blob) { + ret = -EINVAL; + goto err_put_free; + } + + if (!vgdev->has_virgl_3d) { + virtio_gpu_cmd_transfer_to_host_2d + (vgdev, offset, + args->box.w, args->box.h, args->box.x, args->box.y, + objs, NULL); + } else { + virtio_gpu_create_context(dev, file); + + if (!bo->host3d_blob && (args->stride || args->layer_stride)) { + ret = -EINVAL; + goto err_put_free; + } + + ret = virtio_gpu_array_lock_resv(objs); + if (ret != 0) + goto err_put_free; + + ret = -ENOMEM; + fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, + 0); + if (!fence) + goto err_unlock; + + virtio_gpu_cmd_transfer_to_host_3d + (vgdev, + vfpriv ? vfpriv->ctx_id : 0, offset, args->level, + args->stride, args->layer_stride, &args->box, objs, + fence); + dma_fence_put(&fence->f); + } + virtio_gpu_notify(vgdev); + return 0; + +err_unlock: + virtio_gpu_array_unlock_resv(objs); +err_put_free: + virtio_gpu_array_put_free(objs); + return ret; +} + +static int virtio_gpu_wait_ioctl(struct drm_device *dev, void *data, + struct drm_file *file) +{ + struct drm_virtgpu_3d_wait *args = data; + struct drm_gem_object *obj; + long timeout = 15 * HZ; + int ret; + + obj = drm_gem_object_lookup(file, args->handle); + if (obj == NULL) + return -ENOENT; + + if (args->flags & VIRTGPU_WAIT_NOWAIT) { + ret = dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ); + } else { + ret = dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, + true, timeout); + } + if (ret == 0) + ret = -EBUSY; + else if (ret > 0) + ret = 0; + + drm_gem_object_put(obj); + return ret; +} + +static int virtio_gpu_get_caps_ioctl(struct drm_device *dev, + void *data, struct drm_file *file) +{ + struct virtio_gpu_device *vgdev = dev->dev_private; + struct drm_virtgpu_get_caps *args = data; + unsigned size, host_caps_size; + int i; + int found_valid = -1; + int ret; + struct virtio_gpu_drv_cap_cache *cache_ent; + void *ptr; + + if (vgdev->num_capsets == 0) + return -ENOSYS; + + /* don't allow userspace to pass 0 */ + if (args->size == 0) + return -EINVAL; + + spin_lock(&vgdev->display_info_lock); + for (i = 0; i < vgdev->num_capsets; i++) { + if (vgdev->capsets[i].id == args->cap_set_id) { + if (vgdev->capsets[i].max_version >= args->cap_set_ver) { + found_valid = i; + break; + } + } + } + + if (found_valid == -1) { + spin_unlock(&vgdev->display_info_lock); + return -EINVAL; + } + + host_caps_size = vgdev->capsets[found_valid].max_size; + /* only copy to user the minimum of the host caps size or the guest caps size */ + size = min(args->size, host_caps_size); + + list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { + if (cache_ent->id == args->cap_set_id && + cache_ent->version == args->cap_set_ver) { + spin_unlock(&vgdev->display_info_lock); + goto copy_exit; + } + } + spin_unlock(&vgdev->display_info_lock); + + /* not in cache - need to talk to hw */ + ret = virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver, + &cache_ent); + if (ret) + return ret; + virtio_gpu_notify(vgdev); + +copy_exit: + ret = wait_event_timeout(vgdev->resp_wq, + atomic_read(&cache_ent->is_valid), 5 * HZ); + if (!ret) + return -EBUSY; + + /* is_valid check must proceed before copy of the cache entry. */ + smp_rmb(); + + ptr = cache_ent->caps_cache; + + if (copy_to_user(u64_to_user_ptr(args->addr), ptr, size)) + return -EFAULT; + + return 0; +} + +static int verify_blob(struct virtio_gpu_device *vgdev, + struct virtio_gpu_fpriv *vfpriv, + struct virtio_gpu_object_params *params, + struct drm_virtgpu_resource_create_blob *rc_blob, + bool *guest_blob, bool *host3d_blob) +{ + if (!vgdev->has_resource_blob) + return -EINVAL; + + if (rc_blob->blob_flags & ~VIRTGPU_BLOB_FLAG_USE_MASK) + return -EINVAL; + + if (rc_blob->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) { + if (!vgdev->has_resource_assign_uuid) + return -EINVAL; + } + + switch (rc_blob->blob_mem) { + case VIRTGPU_BLOB_MEM_GUEST: + *guest_blob = true; + break; + case VIRTGPU_BLOB_MEM_HOST3D_GUEST: + *guest_blob = true; + fallthrough; + case VIRTGPU_BLOB_MEM_HOST3D: + *host3d_blob = true; + break; + default: + return -EINVAL; + } + + if (*host3d_blob) { + if (!vgdev->has_virgl_3d) + return -EINVAL; + + /* Must be dword aligned. */ + if (rc_blob->cmd_size % 4 != 0) + return -EINVAL; + + params->ctx_id = vfpriv->ctx_id; + params->blob_id = rc_blob->blob_id; + } else { + if (rc_blob->blob_id != 0) + return -EINVAL; + + if (rc_blob->cmd_size != 0) + return -EINVAL; + } + + params->blob_mem = rc_blob->blob_mem; + params->size = rc_blob->size; + params->blob = true; + params->blob_flags = rc_blob->blob_flags; + return 0; +} + +static int virtio_gpu_resource_create_blob_ioctl(struct drm_device *dev, + void *data, + struct drm_file *file) +{ + int ret = 0; + uint32_t handle = 0; + bool guest_blob = false; + bool host3d_blob = false; + struct drm_gem_object *obj; + struct virtio_gpu_object *bo; + struct virtio_gpu_object_params params = { 0 }; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; + struct drm_virtgpu_resource_create_blob *rc_blob = data; + + if (verify_blob(vgdev, vfpriv, ¶ms, rc_blob, + &guest_blob, &host3d_blob)) + return -EINVAL; + + if (vgdev->has_virgl_3d) + virtio_gpu_create_context(dev, file); + + if (rc_blob->cmd_size) { + void *buf; + + buf = memdup_user(u64_to_user_ptr(rc_blob->cmd), + rc_blob->cmd_size); + + if (IS_ERR(buf)) + return PTR_ERR(buf); + + virtio_gpu_cmd_submit(vgdev, buf, rc_blob->cmd_size, + vfpriv->ctx_id, NULL, NULL); + } + + if (guest_blob) + ret = virtio_gpu_object_create(vgdev, ¶ms, &bo, NULL); + else if (!guest_blob && host3d_blob) + ret = virtio_gpu_vram_create(vgdev, ¶ms, &bo); + else + return -EINVAL; + + if (ret < 0) + return ret; + + bo->guest_blob = guest_blob; + bo->host3d_blob = host3d_blob; + bo->blob_mem = rc_blob->blob_mem; + bo->blob_flags = rc_blob->blob_flags; + + obj = &bo->base.base; + if (params.blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE) { + ret = virtio_gpu_resource_assign_uuid(vgdev, bo); + if (ret) { + drm_gem_object_release(obj); + return ret; + } + } + + ret = drm_gem_handle_create(file, obj, &handle); + if (ret) { + drm_gem_object_release(obj); + return ret; + } + + rc_blob->res_handle = bo->hw_res_handle; + rc_blob->bo_handle = handle; + + /* + * The handle owns the reference now. But we must drop our + * remaining reference *after* we no longer need to dereference + * the obj. Otherwise userspace could guess the handle and + * race closing it from another thread. + */ + drm_gem_object_put(obj); + + return 0; +} + +static int virtio_gpu_context_init_ioctl(struct drm_device *dev, + void *data, struct drm_file *file) +{ + int ret = 0; + uint32_t num_params, i, param, value; + uint64_t valid_ring_mask; + size_t len; + struct drm_virtgpu_context_set_param *ctx_set_params = NULL; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; + struct drm_virtgpu_context_init *args = data; + + num_params = args->num_params; + len = num_params * sizeof(struct drm_virtgpu_context_set_param); + + if (!vgdev->has_context_init || !vgdev->has_virgl_3d) + return -EINVAL; + + /* Number of unique parameters supported at this time. */ + if (num_params > 3) + return -EINVAL; + + ctx_set_params = memdup_user(u64_to_user_ptr(args->ctx_set_params), + len); + + if (IS_ERR(ctx_set_params)) + return PTR_ERR(ctx_set_params); + + mutex_lock(&vfpriv->context_lock); + if (vfpriv->context_created) { + ret = -EEXIST; + goto out_unlock; + } + + for (i = 0; i < num_params; i++) { + param = ctx_set_params[i].param; + value = ctx_set_params[i].value; + + switch (param) { + case VIRTGPU_CONTEXT_PARAM_CAPSET_ID: + if (value > MAX_CAPSET_ID) { + ret = -EINVAL; + goto out_unlock; + } + + if ((vgdev->capset_id_mask & (1ULL << value)) == 0) { + ret = -EINVAL; + goto out_unlock; + } + + /* Context capset ID already set */ + if (vfpriv->context_init & + VIRTIO_GPU_CONTEXT_INIT_CAPSET_ID_MASK) { + ret = -EINVAL; + goto out_unlock; + } + + vfpriv->context_init |= value; + break; + case VIRTGPU_CONTEXT_PARAM_NUM_RINGS: + if (vfpriv->base_fence_ctx) { + ret = -EINVAL; + goto out_unlock; + } + + if (value > MAX_RINGS) { + ret = -EINVAL; + goto out_unlock; + } + + vfpriv->base_fence_ctx = dma_fence_context_alloc(value); + vfpriv->num_rings = value; + break; + case VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK: + if (vfpriv->ring_idx_mask) { + ret = -EINVAL; + goto out_unlock; + } + + vfpriv->ring_idx_mask = value; + break; + default: + ret = -EINVAL; + goto out_unlock; + } + } + + if (vfpriv->ring_idx_mask) { + valid_ring_mask = 0; + for (i = 0; i < vfpriv->num_rings; i++) + valid_ring_mask |= 1ULL << i; + + if (~valid_ring_mask & vfpriv->ring_idx_mask) { + ret = -EINVAL; + goto out_unlock; + } + } + + virtio_gpu_create_context_locked(vgdev, vfpriv); + virtio_gpu_notify(vgdev); + +out_unlock: + mutex_unlock(&vfpriv->context_lock); + kfree(ctx_set_params); + return ret; +} + +struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS] = { + DRM_IOCTL_DEF_DRV(VIRTGPU_MAP, virtio_gpu_map_ioctl, + DRM_RENDER_ALLOW), + + DRM_IOCTL_DEF_DRV(VIRTGPU_EXECBUFFER, virtio_gpu_execbuffer_ioctl, + DRM_RENDER_ALLOW), + + DRM_IOCTL_DEF_DRV(VIRTGPU_GETPARAM, virtio_gpu_getparam_ioctl, + DRM_RENDER_ALLOW), + + DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE, + virtio_gpu_resource_create_ioctl, + DRM_RENDER_ALLOW), + + DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_INFO, virtio_gpu_resource_info_ioctl, + DRM_RENDER_ALLOW), + + /* make transfer async to the main ring? - no sure, can we + * thread these in the underlying GL + */ + DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_FROM_HOST, + virtio_gpu_transfer_from_host_ioctl, + DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(VIRTGPU_TRANSFER_TO_HOST, + virtio_gpu_transfer_to_host_ioctl, + DRM_RENDER_ALLOW), + + DRM_IOCTL_DEF_DRV(VIRTGPU_WAIT, virtio_gpu_wait_ioctl, + DRM_RENDER_ALLOW), + + DRM_IOCTL_DEF_DRV(VIRTGPU_GET_CAPS, virtio_gpu_get_caps_ioctl, + DRM_RENDER_ALLOW), + + DRM_IOCTL_DEF_DRV(VIRTGPU_RESOURCE_CREATE_BLOB, + virtio_gpu_resource_create_blob_ioctl, + DRM_RENDER_ALLOW), + + DRM_IOCTL_DEF_DRV(VIRTGPU_CONTEXT_INIT, virtio_gpu_context_init_ioctl, + DRM_RENDER_ALLOW), +}; diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c new file mode 100644 index 000000000..27b7f14da --- /dev/null +++ b/drivers/gpu/drm/virtio/virtgpu_kms.c @@ -0,0 +1,346 @@ +/* + * Copyright (C) 2015 Red Hat, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/virtio.h> +#include <linux/virtio_config.h> +#include <linux/virtio_ring.h> + +#include <drm/drm_file.h> +#include <drm/drm_managed.h> + +#include "virtgpu_drv.h" + +static void virtio_gpu_config_changed_work_func(struct work_struct *work) +{ + struct virtio_gpu_device *vgdev = + container_of(work, struct virtio_gpu_device, + config_changed_work); + u32 events_read, events_clear = 0; + + /* read the config space */ + virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, + events_read, &events_read); + if (events_read & VIRTIO_GPU_EVENT_DISPLAY) { + if (vgdev->has_edid) + virtio_gpu_cmd_get_edids(vgdev); + virtio_gpu_cmd_get_display_info(vgdev); + virtio_gpu_notify(vgdev); + drm_helper_hpd_irq_event(vgdev->ddev); + events_clear |= VIRTIO_GPU_EVENT_DISPLAY; + } + virtio_cwrite_le(vgdev->vdev, struct virtio_gpu_config, + events_clear, &events_clear); +} + +static void virtio_gpu_init_vq(struct virtio_gpu_queue *vgvq, + void (*work_func)(struct work_struct *work)) +{ + spin_lock_init(&vgvq->qlock); + init_waitqueue_head(&vgvq->ack_queue); + INIT_WORK(&vgvq->dequeue_work, work_func); +} + +static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev, + int num_capsets) +{ + int i, ret; + bool invalid_capset_id = false; + struct drm_device *drm = vgdev->ddev; + + vgdev->capsets = drmm_kcalloc(drm, num_capsets, + sizeof(struct virtio_gpu_drv_capset), + GFP_KERNEL); + if (!vgdev->capsets) { + DRM_ERROR("failed to allocate cap sets\n"); + return; + } + for (i = 0; i < num_capsets; i++) { + virtio_gpu_cmd_get_capset_info(vgdev, i); + virtio_gpu_notify(vgdev); + ret = wait_event_timeout(vgdev->resp_wq, + vgdev->capsets[i].id > 0, 5 * HZ); + /* + * Capability ids are defined in the virtio-gpu spec and are + * between 1 to 63, inclusive. + */ + if (!vgdev->capsets[i].id || + vgdev->capsets[i].id > MAX_CAPSET_ID) + invalid_capset_id = true; + + if (ret == 0) + DRM_ERROR("timed out waiting for cap set %d\n", i); + else if (invalid_capset_id) + DRM_ERROR("invalid capset id %u", vgdev->capsets[i].id); + + if (ret == 0 || invalid_capset_id) { + spin_lock(&vgdev->display_info_lock); + drmm_kfree(drm, vgdev->capsets); + vgdev->capsets = NULL; + spin_unlock(&vgdev->display_info_lock); + return; + } + + vgdev->capset_id_mask |= 1 << vgdev->capsets[i].id; + DRM_INFO("cap set %d: id %d, max-version %d, max-size %d\n", + i, vgdev->capsets[i].id, + vgdev->capsets[i].max_version, + vgdev->capsets[i].max_size); + } + + vgdev->num_capsets = num_capsets; +} + +int virtio_gpu_init(struct virtio_device *vdev, struct drm_device *dev) +{ + static vq_callback_t *callbacks[] = { + virtio_gpu_ctrl_ack, virtio_gpu_cursor_ack + }; + static const char * const names[] = { "control", "cursor" }; + + struct virtio_gpu_device *vgdev; + /* this will expand later */ + struct virtqueue *vqs[2]; + u32 num_scanouts, num_capsets; + int ret = 0; + + if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) + return -ENODEV; + + vgdev = drmm_kzalloc(dev, sizeof(struct virtio_gpu_device), GFP_KERNEL); + if (!vgdev) + return -ENOMEM; + + vgdev->ddev = dev; + dev->dev_private = vgdev; + vgdev->vdev = vdev; + + spin_lock_init(&vgdev->display_info_lock); + spin_lock_init(&vgdev->resource_export_lock); + spin_lock_init(&vgdev->host_visible_lock); + ida_init(&vgdev->ctx_id_ida); + ida_init(&vgdev->resource_ida); + init_waitqueue_head(&vgdev->resp_wq); + virtio_gpu_init_vq(&vgdev->ctrlq, virtio_gpu_dequeue_ctrl_func); + virtio_gpu_init_vq(&vgdev->cursorq, virtio_gpu_dequeue_cursor_func); + + vgdev->fence_drv.context = dma_fence_context_alloc(1); + spin_lock_init(&vgdev->fence_drv.lock); + INIT_LIST_HEAD(&vgdev->fence_drv.fences); + INIT_LIST_HEAD(&vgdev->cap_cache); + INIT_WORK(&vgdev->config_changed_work, + virtio_gpu_config_changed_work_func); + + INIT_WORK(&vgdev->obj_free_work, + virtio_gpu_array_put_free_work); + INIT_LIST_HEAD(&vgdev->obj_free_list); + spin_lock_init(&vgdev->obj_free_lock); + +#ifdef __LITTLE_ENDIAN + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_VIRGL)) + vgdev->has_virgl_3d = true; +#endif + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) { + vgdev->has_edid = true; + } + if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) { + vgdev->has_indirect = true; + } + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_UUID)) { + vgdev->has_resource_assign_uuid = true; + } + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_RESOURCE_BLOB)) { + vgdev->has_resource_blob = true; + } + if (virtio_get_shm_region(vgdev->vdev, &vgdev->host_visible_region, + VIRTIO_GPU_SHM_ID_HOST_VISIBLE)) { + if (!devm_request_mem_region(&vgdev->vdev->dev, + vgdev->host_visible_region.addr, + vgdev->host_visible_region.len, + dev_name(&vgdev->vdev->dev))) { + DRM_ERROR("Could not reserve host visible region\n"); + ret = -EBUSY; + goto err_vqs; + } + + DRM_INFO("Host memory window: 0x%lx +0x%lx\n", + (unsigned long)vgdev->host_visible_region.addr, + (unsigned long)vgdev->host_visible_region.len); + vgdev->has_host_visible = true; + drm_mm_init(&vgdev->host_visible_mm, + (unsigned long)vgdev->host_visible_region.addr, + (unsigned long)vgdev->host_visible_region.len); + } + if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_CONTEXT_INIT)) { + vgdev->has_context_init = true; + } + + DRM_INFO("features: %cvirgl %cedid %cresource_blob %chost_visible", + vgdev->has_virgl_3d ? '+' : '-', + vgdev->has_edid ? '+' : '-', + vgdev->has_resource_blob ? '+' : '-', + vgdev->has_host_visible ? '+' : '-'); + + DRM_INFO("features: %ccontext_init\n", + vgdev->has_context_init ? '+' : '-'); + + ret = virtio_find_vqs(vgdev->vdev, 2, vqs, callbacks, names, NULL); + if (ret) { + DRM_ERROR("failed to find virt queues\n"); + goto err_vqs; + } + vgdev->ctrlq.vq = vqs[0]; + vgdev->cursorq.vq = vqs[1]; + ret = virtio_gpu_alloc_vbufs(vgdev); + if (ret) { + DRM_ERROR("failed to alloc vbufs\n"); + goto err_vbufs; + } + + /* get display info */ + virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, + num_scanouts, &num_scanouts); + vgdev->num_scanouts = min_t(uint32_t, num_scanouts, + VIRTIO_GPU_MAX_SCANOUTS); + if (!vgdev->num_scanouts) { + DRM_ERROR("num_scanouts is zero\n"); + ret = -EINVAL; + goto err_scanouts; + } + DRM_INFO("number of scanouts: %d\n", num_scanouts); + + virtio_cread_le(vgdev->vdev, struct virtio_gpu_config, + num_capsets, &num_capsets); + DRM_INFO("number of cap sets: %d\n", num_capsets); + + ret = virtio_gpu_modeset_init(vgdev); + if (ret) { + DRM_ERROR("modeset init failed\n"); + goto err_scanouts; + } + + virtio_device_ready(vgdev->vdev); + + if (num_capsets) + virtio_gpu_get_capsets(vgdev, num_capsets); + if (vgdev->has_edid) + virtio_gpu_cmd_get_edids(vgdev); + virtio_gpu_cmd_get_display_info(vgdev); + virtio_gpu_notify(vgdev); + wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending, + 5 * HZ); + return 0; + +err_scanouts: + virtio_gpu_free_vbufs(vgdev); +err_vbufs: + vgdev->vdev->config->del_vqs(vgdev->vdev); +err_vqs: + dev->dev_private = NULL; + return ret; +} + +static void virtio_gpu_cleanup_cap_cache(struct virtio_gpu_device *vgdev) +{ + struct virtio_gpu_drv_cap_cache *cache_ent, *tmp; + + list_for_each_entry_safe(cache_ent, tmp, &vgdev->cap_cache, head) { + kfree(cache_ent->caps_cache); + kfree(cache_ent); + } +} + +void virtio_gpu_deinit(struct drm_device *dev) +{ + struct virtio_gpu_device *vgdev = dev->dev_private; + + flush_work(&vgdev->obj_free_work); + flush_work(&vgdev->ctrlq.dequeue_work); + flush_work(&vgdev->cursorq.dequeue_work); + flush_work(&vgdev->config_changed_work); + virtio_reset_device(vgdev->vdev); + vgdev->vdev->config->del_vqs(vgdev->vdev); +} + +void virtio_gpu_release(struct drm_device *dev) +{ + struct virtio_gpu_device *vgdev = dev->dev_private; + + if (!vgdev) + return; + + virtio_gpu_modeset_fini(vgdev); + virtio_gpu_free_vbufs(vgdev); + virtio_gpu_cleanup_cap_cache(vgdev); + + if (vgdev->has_host_visible) + drm_mm_takedown(&vgdev->host_visible_mm); +} + +int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file) +{ + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_fpriv *vfpriv; + int handle; + + /* can't create contexts without 3d renderer */ + if (!vgdev->has_virgl_3d) + return 0; + + /* allocate a virt GPU context for this opener */ + vfpriv = kzalloc(sizeof(*vfpriv), GFP_KERNEL); + if (!vfpriv) + return -ENOMEM; + + mutex_init(&vfpriv->context_lock); + + handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL); + if (handle < 0) { + kfree(vfpriv); + return handle; + } + + vfpriv->ctx_id = handle + 1; + file->driver_priv = vfpriv; + return 0; +} + +void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file) +{ + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_fpriv *vfpriv = file->driver_priv; + + if (!vgdev->has_virgl_3d) + return; + + if (vfpriv->context_created) { + virtio_gpu_cmd_context_destroy(vgdev, vfpriv->ctx_id); + virtio_gpu_notify(vgdev); + } + + ida_free(&vgdev->ctx_id_ida, vfpriv->ctx_id - 1); + mutex_destroy(&vfpriv->context_lock); + kfree(vfpriv); + file->driver_priv = NULL; +} diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c new file mode 100644 index 000000000..c7e74cf13 --- /dev/null +++ b/drivers/gpu/drm/virtio/virtgpu_object.c @@ -0,0 +1,249 @@ +/* + * Copyright (C) 2015 Red Hat, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/dma-mapping.h> +#include <linux/moduleparam.h> + +#include "virtgpu_drv.h" + +static int virtio_gpu_virglrenderer_workaround = 1; +module_param_named(virglhack, virtio_gpu_virglrenderer_workaround, int, 0400); + +int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev, uint32_t *resid) +{ + if (virtio_gpu_virglrenderer_workaround) { + /* + * Hack to avoid re-using resource IDs. + * + * virglrenderer versions up to (and including) 0.7.0 + * can't deal with that. virglrenderer commit + * "f91a9dd35715 Fix unlinking resources from hash + * table." (Feb 2019) fixes the bug. + */ + static atomic_t seqno = ATOMIC_INIT(0); + int handle = atomic_inc_return(&seqno); + *resid = handle + 1; + } else { + int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL); + if (handle < 0) + return handle; + *resid = handle + 1; + } + return 0; +} + +static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t id) +{ + if (!virtio_gpu_virglrenderer_workaround) { + ida_free(&vgdev->resource_ida, id - 1); + } +} + +void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo) +{ + struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; + + virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); + if (virtio_gpu_is_shmem(bo)) { + drm_gem_shmem_free(&bo->base); + } else if (virtio_gpu_is_vram(bo)) { + struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); + + spin_lock(&vgdev->host_visible_lock); + if (drm_mm_node_allocated(&vram->vram_node)) + drm_mm_remove_node(&vram->vram_node); + + spin_unlock(&vgdev->host_visible_lock); + + drm_gem_free_mmap_offset(&vram->base.base.base); + drm_gem_object_release(&vram->base.base.base); + kfree(vram); + } +} + +static void virtio_gpu_free_object(struct drm_gem_object *obj) +{ + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; + + if (bo->created) { + virtio_gpu_cmd_unref_resource(vgdev, bo); + virtio_gpu_notify(vgdev); + /* completion handler calls virtio_gpu_cleanup_object() */ + return; + } + virtio_gpu_cleanup_object(bo); +} + +static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = { + .free = virtio_gpu_free_object, + .open = virtio_gpu_gem_object_open, + .close = virtio_gpu_gem_object_close, + .print_info = drm_gem_shmem_object_print_info, + .export = virtgpu_gem_prime_export, + .pin = drm_gem_shmem_object_pin, + .unpin = drm_gem_shmem_object_unpin, + .get_sg_table = drm_gem_shmem_object_get_sg_table, + .vmap = drm_gem_shmem_object_vmap, + .vunmap = drm_gem_shmem_object_vunmap, + .mmap = drm_gem_shmem_object_mmap, + .vm_ops = &drm_gem_shmem_vm_ops, +}; + +bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo) +{ + return bo->base.base.funcs == &virtio_gpu_shmem_funcs; +} + +struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev, + size_t size) +{ + struct virtio_gpu_object_shmem *shmem; + struct drm_gem_shmem_object *dshmem; + + shmem = kzalloc(sizeof(*shmem), GFP_KERNEL); + if (!shmem) + return ERR_PTR(-ENOMEM); + + dshmem = &shmem->base.base; + dshmem->base.funcs = &virtio_gpu_shmem_funcs; + return &dshmem->base; +} + +static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo, + struct virtio_gpu_mem_entry **ents, + unsigned int *nents) +{ + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); + struct scatterlist *sg; + struct sg_table *pages; + int si; + + pages = drm_gem_shmem_get_pages_sgt(&bo->base); + if (IS_ERR(pages)) + return PTR_ERR(pages); + + if (use_dma_api) + *nents = pages->nents; + else + *nents = pages->orig_nents; + + *ents = kvmalloc_array(*nents, + sizeof(struct virtio_gpu_mem_entry), + GFP_KERNEL); + if (!(*ents)) { + DRM_ERROR("failed to allocate ent list\n"); + return -ENOMEM; + } + + if (use_dma_api) { + for_each_sgtable_dma_sg(pages, sg, si) { + (*ents)[si].addr = cpu_to_le64(sg_dma_address(sg)); + (*ents)[si].length = cpu_to_le32(sg_dma_len(sg)); + (*ents)[si].padding = 0; + } + } else { + for_each_sgtable_sg(pages, sg, si) { + (*ents)[si].addr = cpu_to_le64(sg_phys(sg)); + (*ents)[si].length = cpu_to_le32(sg->length); + (*ents)[si].padding = 0; + } + } + + return 0; +} + +int virtio_gpu_object_create(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_params *params, + struct virtio_gpu_object **bo_ptr, + struct virtio_gpu_fence *fence) +{ + struct virtio_gpu_object_array *objs = NULL; + struct drm_gem_shmem_object *shmem_obj; + struct virtio_gpu_object *bo; + struct virtio_gpu_mem_entry *ents = NULL; + unsigned int nents; + int ret; + + *bo_ptr = NULL; + + params->size = roundup(params->size, PAGE_SIZE); + shmem_obj = drm_gem_shmem_create(vgdev->ddev, params->size); + if (IS_ERR(shmem_obj)) + return PTR_ERR(shmem_obj); + bo = gem_to_virtio_gpu_obj(&shmem_obj->base); + + ret = virtio_gpu_resource_id_get(vgdev, &bo->hw_res_handle); + if (ret < 0) + goto err_free_gem; + + bo->dumb = params->dumb; + + ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents); + if (ret != 0) + goto err_put_id; + + if (fence) { + ret = -ENOMEM; + objs = virtio_gpu_array_alloc(1); + if (!objs) + goto err_free_entry; + virtio_gpu_array_add_obj(objs, &bo->base.base); + + ret = virtio_gpu_array_lock_resv(objs); + if (ret != 0) + goto err_put_objs; + } + + if (params->blob) { + if (params->blob_mem == VIRTGPU_BLOB_MEM_GUEST) + bo->guest_blob = true; + + virtio_gpu_cmd_resource_create_blob(vgdev, bo, params, + ents, nents); + } else if (params->virgl) { + virtio_gpu_cmd_resource_create_3d(vgdev, bo, params, + objs, fence); + virtio_gpu_object_attach(vgdev, bo, ents, nents); + } else { + virtio_gpu_cmd_create_resource(vgdev, bo, params, + objs, fence); + virtio_gpu_object_attach(vgdev, bo, ents, nents); + } + + *bo_ptr = bo; + return 0; + +err_put_objs: + virtio_gpu_array_put_free(objs); +err_free_entry: + kvfree(ents); +err_put_id: + virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); +err_free_gem: + drm_gem_shmem_free(shmem_obj); + return ret; +} diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c new file mode 100644 index 000000000..4c09e313b --- /dev/null +++ b/drivers/gpu/drm/virtio/virtgpu_plane.c @@ -0,0 +1,394 @@ +/* + * Copyright (C) 2015 Red Hat, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining + * a copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sublicense, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial + * portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. + * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE + * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION + * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION + * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_damage_helper.h> +#include <drm/drm_fourcc.h> + +#include "virtgpu_drv.h" + +static const uint32_t virtio_gpu_formats[] = { + DRM_FORMAT_HOST_XRGB8888, +}; + +static const uint32_t virtio_gpu_cursor_formats[] = { + DRM_FORMAT_HOST_ARGB8888, +}; + +uint32_t virtio_gpu_translate_format(uint32_t drm_fourcc) +{ + uint32_t format; + + switch (drm_fourcc) { + case DRM_FORMAT_XRGB8888: + format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM; + break; + case DRM_FORMAT_ARGB8888: + format = VIRTIO_GPU_FORMAT_B8G8R8A8_UNORM; + break; + case DRM_FORMAT_BGRX8888: + format = VIRTIO_GPU_FORMAT_X8R8G8B8_UNORM; + break; + case DRM_FORMAT_BGRA8888: + format = VIRTIO_GPU_FORMAT_A8R8G8B8_UNORM; + break; + default: + /* + * This should not happen, we handle everything listed + * in virtio_gpu_formats[]. + */ + format = 0; + break; + } + WARN_ON(format == 0); + return format; +} + +static const struct drm_plane_funcs virtio_gpu_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .reset = drm_atomic_helper_plane_reset, + .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, +}; + +static int virtio_gpu_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + bool is_cursor = plane->type == DRM_PLANE_TYPE_CURSOR; + struct drm_crtc_state *crtc_state; + int ret; + + if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) + return 0; + + crtc_state = drm_atomic_get_crtc_state(state, + new_plane_state->crtc); + if (IS_ERR(crtc_state)) + return PTR_ERR(crtc_state); + + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + is_cursor, true); + return ret; +} + +static void virtio_gpu_update_dumb_bo(struct virtio_gpu_device *vgdev, + struct drm_plane_state *state, + struct drm_rect *rect) +{ + struct virtio_gpu_object *bo = + gem_to_virtio_gpu_obj(state->fb->obj[0]); + struct virtio_gpu_object_array *objs; + uint32_t w = rect->x2 - rect->x1; + uint32_t h = rect->y2 - rect->y1; + uint32_t x = rect->x1; + uint32_t y = rect->y1; + uint32_t off = x * state->fb->format->cpp[0] + + y * state->fb->pitches[0]; + + objs = virtio_gpu_array_alloc(1); + if (!objs) + return; + virtio_gpu_array_add_obj(objs, &bo->base.base); + + virtio_gpu_cmd_transfer_to_host_2d(vgdev, off, w, h, x, y, + objs, NULL); +} + +static void virtio_gpu_resource_flush(struct drm_plane *plane, + uint32_t x, uint32_t y, + uint32_t width, uint32_t height) +{ + struct drm_device *dev = plane->dev; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_object *bo; + + vgfb = to_virtio_gpu_framebuffer(plane->state->fb); + bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); + if (vgfb->fence) { + struct virtio_gpu_object_array *objs; + + objs = virtio_gpu_array_alloc(1); + if (!objs) + return; + virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); + virtio_gpu_array_lock_resv(objs); + virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, + width, height, objs, vgfb->fence); + virtio_gpu_notify(vgdev); + + dma_fence_wait_timeout(&vgfb->fence->f, true, + msecs_to_jiffies(50)); + dma_fence_put(&vgfb->fence->f); + vgfb->fence = NULL; + } else { + virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, x, y, + width, height, NULL, NULL); + virtio_gpu_notify(vgdev); + } +} + +static void virtio_gpu_primary_plane_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct drm_device *dev = plane->dev; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_output *output = NULL; + struct virtio_gpu_object *bo; + struct drm_rect rect; + + if (plane->state->crtc) + output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); + if (old_state->crtc) + output = drm_crtc_to_virtio_gpu_output(old_state->crtc); + if (WARN_ON(!output)) + return; + + if (!plane->state->fb || !output->crtc.state->active) { + DRM_DEBUG("nofb\n"); + virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, + plane->state->src_w >> 16, + plane->state->src_h >> 16, + 0, 0); + virtio_gpu_notify(vgdev); + return; + } + + if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect)) + return; + + bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); + if (bo->dumb) + virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect); + + if (plane->state->fb != old_state->fb || + plane->state->src_w != old_state->src_w || + plane->state->src_h != old_state->src_h || + plane->state->src_x != old_state->src_x || + plane->state->src_y != old_state->src_y || + output->needs_modeset) { + output->needs_modeset = false; + DRM_DEBUG("handle 0x%x, crtc %dx%d+%d+%d, src %dx%d+%d+%d\n", + bo->hw_res_handle, + plane->state->crtc_w, plane->state->crtc_h, + plane->state->crtc_x, plane->state->crtc_y, + plane->state->src_w >> 16, + plane->state->src_h >> 16, + plane->state->src_x >> 16, + plane->state->src_y >> 16); + + if (bo->host3d_blob || bo->guest_blob) { + virtio_gpu_cmd_set_scanout_blob + (vgdev, output->index, bo, + plane->state->fb, + plane->state->src_w >> 16, + plane->state->src_h >> 16, + plane->state->src_x >> 16, + plane->state->src_y >> 16); + } else { + virtio_gpu_cmd_set_scanout(vgdev, output->index, + bo->hw_res_handle, + plane->state->src_w >> 16, + plane->state->src_h >> 16, + plane->state->src_x >> 16, + plane->state->src_y >> 16); + } + } + + virtio_gpu_resource_flush(plane, + rect.x1, + rect.y1, + rect.x2 - rect.x1, + rect.y2 - rect.y1); +} + +static int virtio_gpu_plane_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct drm_device *dev = plane->dev; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_object *bo; + + if (!new_state->fb) + return 0; + + vgfb = to_virtio_gpu_framebuffer(new_state->fb); + bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); + if (!bo || (plane->type == DRM_PLANE_TYPE_PRIMARY && !bo->guest_blob)) + return 0; + + if (bo->dumb && (plane->state->fb != new_state->fb)) { + vgfb->fence = virtio_gpu_fence_alloc(vgdev, vgdev->fence_drv.context, + 0); + if (!vgfb->fence) + return -ENOMEM; + } + + return 0; +} + +static void virtio_gpu_plane_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct virtio_gpu_framebuffer *vgfb; + + if (!state->fb) + return; + + vgfb = to_virtio_gpu_framebuffer(state->fb); + if (vgfb->fence) { + dma_fence_put(&vgfb->fence->f); + vgfb->fence = NULL; + } +} + +static void virtio_gpu_cursor_plane_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + struct drm_device *dev = plane->dev; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_output *output = NULL; + struct virtio_gpu_framebuffer *vgfb; + struct virtio_gpu_object *bo = NULL; + uint32_t handle; + + if (plane->state->crtc) + output = drm_crtc_to_virtio_gpu_output(plane->state->crtc); + if (old_state->crtc) + output = drm_crtc_to_virtio_gpu_output(old_state->crtc); + if (WARN_ON(!output)) + return; + + if (plane->state->fb) { + vgfb = to_virtio_gpu_framebuffer(plane->state->fb); + bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]); + handle = bo->hw_res_handle; + } else { + handle = 0; + } + + if (bo && bo->dumb && (plane->state->fb != old_state->fb)) { + /* new cursor -- update & wait */ + struct virtio_gpu_object_array *objs; + + objs = virtio_gpu_array_alloc(1); + if (!objs) + return; + virtio_gpu_array_add_obj(objs, vgfb->base.obj[0]); + virtio_gpu_array_lock_resv(objs); + virtio_gpu_cmd_transfer_to_host_2d + (vgdev, 0, + plane->state->crtc_w, + plane->state->crtc_h, + 0, 0, objs, vgfb->fence); + virtio_gpu_notify(vgdev); + dma_fence_wait(&vgfb->fence->f, true); + dma_fence_put(&vgfb->fence->f); + vgfb->fence = NULL; + } + + if (plane->state->fb != old_state->fb) { + DRM_DEBUG("update, handle %d, pos +%d+%d, hot %d,%d\n", handle, + plane->state->crtc_x, + plane->state->crtc_y, + plane->state->fb ? plane->state->fb->hot_x : 0, + plane->state->fb ? plane->state->fb->hot_y : 0); + output->cursor.hdr.type = + cpu_to_le32(VIRTIO_GPU_CMD_UPDATE_CURSOR); + output->cursor.resource_id = cpu_to_le32(handle); + if (plane->state->fb) { + output->cursor.hot_x = + cpu_to_le32(plane->state->fb->hot_x); + output->cursor.hot_y = + cpu_to_le32(plane->state->fb->hot_y); + } else { + output->cursor.hot_x = cpu_to_le32(0); + output->cursor.hot_y = cpu_to_le32(0); + } + } else { + DRM_DEBUG("move +%d+%d\n", + plane->state->crtc_x, + plane->state->crtc_y); + output->cursor.hdr.type = + cpu_to_le32(VIRTIO_GPU_CMD_MOVE_CURSOR); + } + output->cursor.pos.x = cpu_to_le32(plane->state->crtc_x); + output->cursor.pos.y = cpu_to_le32(plane->state->crtc_y); + virtio_gpu_cursor_ping(vgdev, output); +} + +static const struct drm_plane_helper_funcs virtio_gpu_primary_helper_funcs = { + .prepare_fb = virtio_gpu_plane_prepare_fb, + .cleanup_fb = virtio_gpu_plane_cleanup_fb, + .atomic_check = virtio_gpu_plane_atomic_check, + .atomic_update = virtio_gpu_primary_plane_update, +}; + +static const struct drm_plane_helper_funcs virtio_gpu_cursor_helper_funcs = { + .prepare_fb = virtio_gpu_plane_prepare_fb, + .cleanup_fb = virtio_gpu_plane_cleanup_fb, + .atomic_check = virtio_gpu_plane_atomic_check, + .atomic_update = virtio_gpu_cursor_plane_update, +}; + +struct drm_plane *virtio_gpu_plane_init(struct virtio_gpu_device *vgdev, + enum drm_plane_type type, + int index) +{ + struct drm_device *dev = vgdev->ddev; + const struct drm_plane_helper_funcs *funcs; + struct drm_plane *plane; + const uint32_t *formats; + int nformats; + + if (type == DRM_PLANE_TYPE_CURSOR) { + formats = virtio_gpu_cursor_formats; + nformats = ARRAY_SIZE(virtio_gpu_cursor_formats); + funcs = &virtio_gpu_cursor_helper_funcs; + } else { + formats = virtio_gpu_formats; + nformats = ARRAY_SIZE(virtio_gpu_formats); + funcs = &virtio_gpu_primary_helper_funcs; + } + + plane = drmm_universal_plane_alloc(dev, struct drm_plane, dev, + 1 << index, &virtio_gpu_plane_funcs, + formats, nformats, NULL, type, NULL); + if (IS_ERR(plane)) + return plane; + + drm_plane_helper_add(plane, funcs); + return plane; +} diff --git a/drivers/gpu/drm/virtio/virtgpu_prime.c b/drivers/gpu/drm/virtio/virtgpu_prime.c new file mode 100644 index 000000000..44425f20d --- /dev/null +++ b/drivers/gpu/drm/virtio/virtgpu_prime.c @@ -0,0 +1,170 @@ +/* + * Copyright 2014 Canonical + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Andreas Pokorny + */ + +#include <drm/drm_prime.h> +#include <linux/virtio_dma_buf.h> + +#include "virtgpu_drv.h" + +static int virtgpu_virtio_get_uuid(struct dma_buf *buf, + uuid_t *uuid) +{ + struct drm_gem_object *obj = buf->priv; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + struct virtio_gpu_device *vgdev = obj->dev->dev_private; + + wait_event(vgdev->resp_wq, bo->uuid_state != STATE_INITIALIZING); + if (bo->uuid_state != STATE_OK) + return -ENODEV; + + uuid_copy(uuid, &bo->uuid); + + return 0; +} + +static struct sg_table * +virtgpu_gem_map_dma_buf(struct dma_buf_attachment *attach, + enum dma_data_direction dir) +{ + struct drm_gem_object *obj = attach->dmabuf->priv; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + + if (virtio_gpu_is_vram(bo)) + return virtio_gpu_vram_map_dma_buf(bo, attach->dev, dir); + + return drm_gem_map_dma_buf(attach, dir); +} + +static void virtgpu_gem_unmap_dma_buf(struct dma_buf_attachment *attach, + struct sg_table *sgt, + enum dma_data_direction dir) +{ + struct drm_gem_object *obj = attach->dmabuf->priv; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + + if (virtio_gpu_is_vram(bo)) { + virtio_gpu_vram_unmap_dma_buf(attach->dev, sgt, dir); + return; + } + + drm_gem_unmap_dma_buf(attach, sgt, dir); +} + +static const struct virtio_dma_buf_ops virtgpu_dmabuf_ops = { + .ops = { + .cache_sgt_mapping = true, + .attach = virtio_dma_buf_attach, + .detach = drm_gem_map_detach, + .map_dma_buf = virtgpu_gem_map_dma_buf, + .unmap_dma_buf = virtgpu_gem_unmap_dma_buf, + .release = drm_gem_dmabuf_release, + .mmap = drm_gem_dmabuf_mmap, + .vmap = drm_gem_dmabuf_vmap, + .vunmap = drm_gem_dmabuf_vunmap, + }, + .device_attach = drm_gem_map_attach, + .get_uuid = virtgpu_virtio_get_uuid, +}; + +int virtio_gpu_resource_assign_uuid(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo) +{ + struct virtio_gpu_object_array *objs; + + objs = virtio_gpu_array_alloc(1); + if (!objs) + return -ENOMEM; + + virtio_gpu_array_add_obj(objs, &bo->base.base); + + return virtio_gpu_cmd_resource_assign_uuid(vgdev, objs); +} + +struct dma_buf *virtgpu_gem_prime_export(struct drm_gem_object *obj, + int flags) +{ + struct dma_buf *buf; + struct drm_device *dev = obj->dev; + struct virtio_gpu_device *vgdev = dev->dev_private; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + int ret = 0; + bool blob = bo->host3d_blob || bo->guest_blob; + DEFINE_DMA_BUF_EXPORT_INFO(exp_info); + + if (!blob) { + if (vgdev->has_resource_assign_uuid) { + ret = virtio_gpu_resource_assign_uuid(vgdev, bo); + if (ret) + return ERR_PTR(ret); + + virtio_gpu_notify(vgdev); + } else { + bo->uuid_state = STATE_ERR; + } + } else if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_CROSS_DEVICE)) { + bo->uuid_state = STATE_ERR; + } + + exp_info.ops = &virtgpu_dmabuf_ops.ops; + exp_info.size = obj->size; + exp_info.flags = flags; + exp_info.priv = obj; + exp_info.resv = obj->resv; + + buf = virtio_dma_buf_export(&exp_info); + if (IS_ERR(buf)) + return buf; + + drm_dev_get(dev); + drm_gem_object_get(obj); + + return buf; +} + +struct drm_gem_object *virtgpu_gem_prime_import(struct drm_device *dev, + struct dma_buf *buf) +{ + struct drm_gem_object *obj; + + if (buf->ops == &virtgpu_dmabuf_ops.ops) { + obj = buf->priv; + if (obj->dev == dev) { + /* + * Importing dmabuf exported from our own gem increases + * refcount on gem itself instead of f_count of dmabuf. + */ + drm_gem_object_get(obj); + return obj; + } + } + + return drm_gem_prime_import(dev, buf); +} + +struct drm_gem_object *virtgpu_gem_prime_import_sg_table( + struct drm_device *dev, struct dma_buf_attachment *attach, + struct sg_table *table) +{ + return ERR_PTR(-ENODEV); +} diff --git a/drivers/gpu/drm/virtio/virtgpu_trace.h b/drivers/gpu/drm/virtio/virtgpu_trace.h new file mode 100644 index 000000000..711ecc2bd --- /dev/null +++ b/drivers/gpu/drm/virtio/virtgpu_trace.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#if !defined(_VIRTGPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _VIRTGPU_TRACE_H_ + +#include <linux/tracepoint.h> + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM virtio_gpu +#define TRACE_INCLUDE_FILE virtgpu_trace + +DECLARE_EVENT_CLASS(virtio_gpu_cmd, + TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr), + TP_ARGS(vq, hdr), + TP_STRUCT__entry( + __field(int, dev) + __field(unsigned int, vq) + __field(const char *, name) + __field(u32, type) + __field(u32, flags) + __field(u64, fence_id) + __field(u32, ctx_id) + ), + TP_fast_assign( + __entry->dev = vq->vdev->index; + __entry->vq = vq->index; + __entry->name = vq->name; + __entry->type = le32_to_cpu(hdr->type); + __entry->flags = le32_to_cpu(hdr->flags); + __entry->fence_id = le64_to_cpu(hdr->fence_id); + __entry->ctx_id = le32_to_cpu(hdr->ctx_id); + ), + TP_printk("vdev=%d vq=%u name=%s type=0x%x flags=0x%x fence_id=%llu ctx_id=%u", + __entry->dev, __entry->vq, __entry->name, + __entry->type, __entry->flags, __entry->fence_id, + __entry->ctx_id) +); + +DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_queue, + TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr), + TP_ARGS(vq, hdr) +); + +DEFINE_EVENT(virtio_gpu_cmd, virtio_gpu_cmd_response, + TP_PROTO(struct virtqueue *vq, struct virtio_gpu_ctrl_hdr *hdr), + TP_ARGS(vq, hdr) +); + +#endif + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH ../../drivers/gpu/drm/virtio +#include <trace/define_trace.h> diff --git a/drivers/gpu/drm/virtio/virtgpu_trace_points.c b/drivers/gpu/drm/virtio/virtgpu_trace_points.c new file mode 100644 index 000000000..1970cb6f2 --- /dev/null +++ b/drivers/gpu/drm/virtio/virtgpu_trace_points.c @@ -0,0 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "virtgpu_drv.h" + +#define CREATE_TRACE_POINTS +#include "virtgpu_trace.h" diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c new file mode 100644 index 000000000..208e9434c --- /dev/null +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -0,0 +1,1298 @@ +/* + * Copyright (C) 2015 Red Hat, Inc. + * All Rights Reserved. + * + * Authors: + * Dave Airlie <airlied@redhat.com> + * Gerd Hoffmann <kraxel@redhat.com> + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include <linux/dma-mapping.h> +#include <linux/virtio.h> +#include <linux/virtio_config.h> +#include <linux/virtio_ring.h> + +#include <drm/drm_edid.h> + +#include "virtgpu_drv.h" +#include "virtgpu_trace.h" + +#define MAX_INLINE_CMD_SIZE 96 +#define MAX_INLINE_RESP_SIZE 24 +#define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \ + + MAX_INLINE_CMD_SIZE \ + + MAX_INLINE_RESP_SIZE) + +static void convert_to_hw_box(struct virtio_gpu_box *dst, + const struct drm_virtgpu_3d_box *src) +{ + dst->x = cpu_to_le32(src->x); + dst->y = cpu_to_le32(src->y); + dst->z = cpu_to_le32(src->z); + dst->w = cpu_to_le32(src->w); + dst->h = cpu_to_le32(src->h); + dst->d = cpu_to_le32(src->d); +} + +void virtio_gpu_ctrl_ack(struct virtqueue *vq) +{ + struct drm_device *dev = vq->vdev->priv; + struct virtio_gpu_device *vgdev = dev->dev_private; + + schedule_work(&vgdev->ctrlq.dequeue_work); +} + +void virtio_gpu_cursor_ack(struct virtqueue *vq) +{ + struct drm_device *dev = vq->vdev->priv; + struct virtio_gpu_device *vgdev = dev->dev_private; + + schedule_work(&vgdev->cursorq.dequeue_work); +} + +int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev) +{ + vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs", + VBUFFER_SIZE, + __alignof__(struct virtio_gpu_vbuffer), + 0, NULL); + if (!vgdev->vbufs) + return -ENOMEM; + return 0; +} + +void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev) +{ + kmem_cache_destroy(vgdev->vbufs); + vgdev->vbufs = NULL; +} + +static struct virtio_gpu_vbuffer* +virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev, + int size, int resp_size, void *resp_buf, + virtio_gpu_resp_cb resp_cb) +{ + struct virtio_gpu_vbuffer *vbuf; + + vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL | __GFP_NOFAIL); + + BUG_ON(size > MAX_INLINE_CMD_SIZE || + size < sizeof(struct virtio_gpu_ctrl_hdr)); + vbuf->buf = (void *)vbuf + sizeof(*vbuf); + vbuf->size = size; + + vbuf->resp_cb = resp_cb; + vbuf->resp_size = resp_size; + if (resp_size <= MAX_INLINE_RESP_SIZE) + vbuf->resp_buf = (void *)vbuf->buf + size; + else + vbuf->resp_buf = resp_buf; + BUG_ON(!vbuf->resp_buf); + return vbuf; +} + +static struct virtio_gpu_ctrl_hdr * +virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf) +{ + /* this assumes a vbuf contains a command that starts with a + * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor + * virtqueues. + */ + return (struct virtio_gpu_ctrl_hdr *)vbuf->buf; +} + +static struct virtio_gpu_update_cursor* +virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer **vbuffer_p) +{ + struct virtio_gpu_vbuffer *vbuf; + + vbuf = virtio_gpu_get_vbuf + (vgdev, sizeof(struct virtio_gpu_update_cursor), + 0, NULL, NULL); + if (IS_ERR(vbuf)) { + *vbuffer_p = NULL; + return ERR_CAST(vbuf); + } + *vbuffer_p = vbuf; + return (struct virtio_gpu_update_cursor *)vbuf->buf; +} + +static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev, + virtio_gpu_resp_cb cb, + struct virtio_gpu_vbuffer **vbuffer_p, + int cmd_size, int resp_size, + void *resp_buf) +{ + struct virtio_gpu_vbuffer *vbuf; + + vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size, + resp_size, resp_buf, cb); + *vbuffer_p = vbuf; + return (struct virtio_gpu_command *)vbuf->buf; +} + +static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer **vbuffer_p, + int size) +{ + return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size, + sizeof(struct virtio_gpu_ctrl_hdr), + NULL); +} + +static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer **vbuffer_p, + int size, + virtio_gpu_resp_cb cb) +{ + return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size, + sizeof(struct virtio_gpu_ctrl_hdr), + NULL); +} + +static void free_vbuf(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + if (vbuf->resp_size > MAX_INLINE_RESP_SIZE) + kfree(vbuf->resp_buf); + kvfree(vbuf->data_buf); + kmem_cache_free(vgdev->vbufs, vbuf); +} + +static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list) +{ + struct virtio_gpu_vbuffer *vbuf; + unsigned int len; + int freed = 0; + + while ((vbuf = virtqueue_get_buf(vq, &len))) { + list_add_tail(&vbuf->list, reclaim_list); + freed++; + } + if (freed == 0) + DRM_DEBUG("Huh? zero vbufs reclaimed"); +} + +void virtio_gpu_dequeue_ctrl_func(struct work_struct *work) +{ + struct virtio_gpu_device *vgdev = + container_of(work, struct virtio_gpu_device, + ctrlq.dequeue_work); + struct list_head reclaim_list; + struct virtio_gpu_vbuffer *entry, *tmp; + struct virtio_gpu_ctrl_hdr *resp; + u64 fence_id; + + INIT_LIST_HEAD(&reclaim_list); + spin_lock(&vgdev->ctrlq.qlock); + do { + virtqueue_disable_cb(vgdev->ctrlq.vq); + reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list); + + } while (!virtqueue_enable_cb(vgdev->ctrlq.vq)); + spin_unlock(&vgdev->ctrlq.qlock); + + list_for_each_entry(entry, &reclaim_list, list) { + resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf; + + trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp); + + if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) { + if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) { + struct virtio_gpu_ctrl_hdr *cmd; + cmd = virtio_gpu_vbuf_ctrl_hdr(entry); + DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n", + le32_to_cpu(resp->type), + le32_to_cpu(cmd->type)); + } else + DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type)); + } + if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) { + fence_id = le64_to_cpu(resp->fence_id); + virtio_gpu_fence_event_process(vgdev, fence_id); + } + if (entry->resp_cb) + entry->resp_cb(vgdev, entry); + } + wake_up(&vgdev->ctrlq.ack_queue); + + list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + if (entry->objs) + virtio_gpu_array_put_free_delayed(vgdev, entry->objs); + list_del(&entry->list); + free_vbuf(vgdev, entry); + } +} + +void virtio_gpu_dequeue_cursor_func(struct work_struct *work) +{ + struct virtio_gpu_device *vgdev = + container_of(work, struct virtio_gpu_device, + cursorq.dequeue_work); + struct list_head reclaim_list; + struct virtio_gpu_vbuffer *entry, *tmp; + + INIT_LIST_HEAD(&reclaim_list); + spin_lock(&vgdev->cursorq.qlock); + do { + virtqueue_disable_cb(vgdev->cursorq.vq); + reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list); + } while (!virtqueue_enable_cb(vgdev->cursorq.vq)); + spin_unlock(&vgdev->cursorq.qlock); + + list_for_each_entry_safe(entry, tmp, &reclaim_list, list) { + list_del(&entry->list); + free_vbuf(vgdev, entry); + } + wake_up(&vgdev->cursorq.ack_queue); +} + +/* Create sg_table from a vmalloc'd buffer. */ +static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents) +{ + int ret, s, i; + struct sg_table *sgt; + struct scatterlist *sg; + struct page *pg; + + if (WARN_ON(!PAGE_ALIGNED(data))) + return NULL; + + sgt = kmalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return NULL; + + *sg_ents = DIV_ROUND_UP(size, PAGE_SIZE); + ret = sg_alloc_table(sgt, *sg_ents, GFP_KERNEL); + if (ret) { + kfree(sgt); + return NULL; + } + + for_each_sgtable_sg(sgt, sg, i) { + pg = vmalloc_to_page(data); + if (!pg) { + sg_free_table(sgt); + kfree(sgt); + return NULL; + } + + s = min_t(int, PAGE_SIZE, size); + sg_set_page(sg, pg, s, 0); + + size -= s; + data += s; + } + + return sgt; +} + +static int virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf, + struct virtio_gpu_fence *fence, + int elemcnt, + struct scatterlist **sgs, + int outcnt, + int incnt) +{ + struct virtqueue *vq = vgdev->ctrlq.vq; + int ret, idx; + + if (!drm_dev_enter(vgdev->ddev, &idx)) { + if (fence && vbuf->objs) + virtio_gpu_array_unlock_resv(vbuf->objs); + free_vbuf(vgdev, vbuf); + return -ENODEV; + } + + if (vgdev->has_indirect) + elemcnt = 1; + +again: + spin_lock(&vgdev->ctrlq.qlock); + + if (vq->num_free < elemcnt) { + spin_unlock(&vgdev->ctrlq.qlock); + virtio_gpu_notify(vgdev); + wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt); + goto again; + } + + /* now that the position of the vbuf in the virtqueue is known, we can + * finally set the fence id + */ + if (fence) { + virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf), + fence); + if (vbuf->objs) { + virtio_gpu_array_add_fence(vbuf->objs, &fence->f); + virtio_gpu_array_unlock_resv(vbuf->objs); + } + } + + ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC); + WARN_ON(ret); + + trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf)); + + atomic_inc(&vgdev->pending_commands); + + spin_unlock(&vgdev->ctrlq.qlock); + + drm_dev_exit(idx); + return 0; +} + +static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf, + struct virtio_gpu_fence *fence) +{ + struct scatterlist *sgs[3], vcmd, vout, vresp; + struct sg_table *sgt = NULL; + int elemcnt = 0, outcnt = 0, incnt = 0, ret; + + /* set up vcmd */ + sg_init_one(&vcmd, vbuf->buf, vbuf->size); + elemcnt++; + sgs[outcnt] = &vcmd; + outcnt++; + + /* set up vout */ + if (vbuf->data_size) { + if (is_vmalloc_addr(vbuf->data_buf)) { + int sg_ents; + sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size, + &sg_ents); + if (!sgt) { + if (fence && vbuf->objs) + virtio_gpu_array_unlock_resv(vbuf->objs); + return -ENOMEM; + } + + elemcnt += sg_ents; + sgs[outcnt] = sgt->sgl; + } else { + sg_init_one(&vout, vbuf->data_buf, vbuf->data_size); + elemcnt++; + sgs[outcnt] = &vout; + } + outcnt++; + } + + /* set up vresp */ + if (vbuf->resp_size) { + sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size); + elemcnt++; + sgs[outcnt + incnt] = &vresp; + incnt++; + } + + ret = virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt, + incnt); + + if (sgt) { + sg_free_table(sgt); + kfree(sgt); + } + return ret; +} + +void virtio_gpu_notify(struct virtio_gpu_device *vgdev) +{ + bool notify; + + if (!atomic_read(&vgdev->pending_commands)) + return; + + spin_lock(&vgdev->ctrlq.qlock); + atomic_set(&vgdev->pending_commands, 0); + notify = virtqueue_kick_prepare(vgdev->ctrlq.vq); + spin_unlock(&vgdev->ctrlq.qlock); + + if (notify) + virtqueue_notify(vgdev->ctrlq.vq); +} + +static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + return virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL); +} + +static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + struct virtqueue *vq = vgdev->cursorq.vq; + struct scatterlist *sgs[1], ccmd; + int idx, ret, outcnt; + bool notify; + + if (!drm_dev_enter(vgdev->ddev, &idx)) { + free_vbuf(vgdev, vbuf); + return; + } + + sg_init_one(&ccmd, vbuf->buf, vbuf->size); + sgs[0] = &ccmd; + outcnt = 1; + + spin_lock(&vgdev->cursorq.qlock); +retry: + ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC); + if (ret == -ENOSPC) { + spin_unlock(&vgdev->cursorq.qlock); + wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt); + spin_lock(&vgdev->cursorq.qlock); + goto retry; + } else { + trace_virtio_gpu_cmd_queue(vq, + virtio_gpu_vbuf_ctrl_hdr(vbuf)); + + notify = virtqueue_kick_prepare(vq); + } + + spin_unlock(&vgdev->cursorq.qlock); + + if (notify) + virtqueue_notify(vq); + + drm_dev_exit(idx); +} + +/* just create gem objects for userspace and long lived objects, + * just use dma_alloced pages for the queue objects? + */ + +/* create a basic resource */ +void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo, + struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) +{ + struct virtio_gpu_resource_create_2d *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->format = cpu_to_le32(params->format); + cmd_p->width = cpu_to_le32(params->width); + cmd_p->height = cpu_to_le32(params->height); + + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); + bo->created = true; +} + +static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + struct virtio_gpu_object *bo; + + bo = vbuf->resp_cb_data; + vbuf->resp_cb_data = NULL; + + virtio_gpu_cleanup_object(bo); +} + +void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo) +{ + struct virtio_gpu_resource_unref *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + int ret; + + cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p), + virtio_gpu_cmd_unref_cb); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + + vbuf->resp_cb_data = bo; + ret = virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + if (ret < 0) + virtio_gpu_cleanup_object(bo); +} + +void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev, + uint32_t scanout_id, uint32_t resource_id, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y) +{ + struct virtio_gpu_set_scanout *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT); + cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->scanout_id = cpu_to_le32(scanout_id); + cmd_p->r.width = cpu_to_le32(width); + cmd_p->r.height = cpu_to_le32(height); + cmd_p->r.x = cpu_to_le32(x); + cmd_p->r.y = cpu_to_le32(y); + + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); +} + +void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev, + uint32_t resource_id, + uint32_t x, uint32_t y, + uint32_t width, uint32_t height, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) +{ + struct virtio_gpu_resource_flush *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH); + cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->r.width = cpu_to_le32(width); + cmd_p->r.height = cpu_to_le32(height); + cmd_p->r.x = cpu_to_le32(x); + cmd_p->r.y = cpu_to_le32(y); + + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); +} + +void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, + uint64_t offset, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) +{ + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); + struct virtio_gpu_transfer_to_host_2d *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); + + if (virtio_gpu_is_shmem(bo) && use_dma_api) + dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, + bo->base.sgt, DMA_TO_DEVICE); + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->offset = cpu_to_le64(offset); + cmd_p->r.width = cpu_to_le32(width); + cmd_p->r.height = cpu_to_le32(height); + cmd_p->r.x = cpu_to_le32(x); + cmd_p->r.y = cpu_to_le32(y); + + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); +} + +static void +virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev, + uint32_t resource_id, + struct virtio_gpu_mem_entry *ents, + uint32_t nents, + struct virtio_gpu_fence *fence) +{ + struct virtio_gpu_resource_attach_backing *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING); + cmd_p->resource_id = cpu_to_le32(resource_id); + cmd_p->nr_entries = cpu_to_le32(nents); + + vbuf->data_buf = ents; + vbuf->data_size = sizeof(*ents) * nents; + + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); +} + +static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + struct virtio_gpu_resp_display_info *resp = + (struct virtio_gpu_resp_display_info *)vbuf->resp_buf; + int i; + + spin_lock(&vgdev->display_info_lock); + for (i = 0; i < vgdev->num_scanouts; i++) { + vgdev->outputs[i].info = resp->pmodes[i]; + if (resp->pmodes[i].enabled) { + DRM_DEBUG("output %d: %dx%d+%d+%d", i, + le32_to_cpu(resp->pmodes[i].r.width), + le32_to_cpu(resp->pmodes[i].r.height), + le32_to_cpu(resp->pmodes[i].r.x), + le32_to_cpu(resp->pmodes[i].r.y)); + } else { + DRM_DEBUG("output %d: disabled", i); + } + } + + vgdev->display_info_pending = false; + spin_unlock(&vgdev->display_info_lock); + wake_up(&vgdev->resp_wq); + + if (!drm_helper_hpd_irq_event(vgdev->ddev)) + drm_kms_helper_hotplug_event(vgdev->ddev); +} + +static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + struct virtio_gpu_get_capset_info *cmd = + (struct virtio_gpu_get_capset_info *)vbuf->buf; + struct virtio_gpu_resp_capset_info *resp = + (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf; + int i = le32_to_cpu(cmd->capset_index); + + spin_lock(&vgdev->display_info_lock); + if (vgdev->capsets) { + vgdev->capsets[i].id = le32_to_cpu(resp->capset_id); + vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version); + vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size); + } else { + DRM_ERROR("invalid capset memory."); + } + spin_unlock(&vgdev->display_info_lock); + wake_up(&vgdev->resp_wq); +} + +static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + struct virtio_gpu_get_capset *cmd = + (struct virtio_gpu_get_capset *)vbuf->buf; + struct virtio_gpu_resp_capset *resp = + (struct virtio_gpu_resp_capset *)vbuf->resp_buf; + struct virtio_gpu_drv_cap_cache *cache_ent; + + spin_lock(&vgdev->display_info_lock); + list_for_each_entry(cache_ent, &vgdev->cap_cache, head) { + if (cache_ent->version == le32_to_cpu(cmd->capset_version) && + cache_ent->id == le32_to_cpu(cmd->capset_id)) { + memcpy(cache_ent->caps_cache, resp->capset_data, + cache_ent->size); + /* Copy must occur before is_valid is signalled. */ + smp_wmb(); + atomic_set(&cache_ent->is_valid, 1); + break; + } + } + spin_unlock(&vgdev->display_info_lock); + wake_up_all(&vgdev->resp_wq); +} + +static int virtio_get_edid_block(void *data, u8 *buf, + unsigned int block, size_t len) +{ + struct virtio_gpu_resp_edid *resp = data; + size_t start = block * EDID_LENGTH; + + if (start + len > le32_to_cpu(resp->size)) + return -EINVAL; + memcpy(buf, resp->edid + start, len); + return 0; +} + +static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + struct virtio_gpu_cmd_get_edid *cmd = + (struct virtio_gpu_cmd_get_edid *)vbuf->buf; + struct virtio_gpu_resp_edid *resp = + (struct virtio_gpu_resp_edid *)vbuf->resp_buf; + uint32_t scanout = le32_to_cpu(cmd->scanout); + struct virtio_gpu_output *output; + struct edid *new_edid, *old_edid; + + if (scanout >= vgdev->num_scanouts) + return; + output = vgdev->outputs + scanout; + + new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp); + drm_connector_update_edid_property(&output->conn, new_edid); + + spin_lock(&vgdev->display_info_lock); + old_edid = output->edid; + output->edid = new_edid; + spin_unlock(&vgdev->display_info_lock); + + kfree(old_edid); + wake_up(&vgdev->resp_wq); +} + +int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev) +{ + struct virtio_gpu_ctrl_hdr *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + void *resp_buf; + + resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info), + GFP_KERNEL); + if (!resp_buf) + return -ENOMEM; + + cmd_p = virtio_gpu_alloc_cmd_resp + (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf, + sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info), + resp_buf); + memset(cmd_p, 0, sizeof(*cmd_p)); + + vgdev->display_info_pending = true; + cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO); + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + return 0; +} + +int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx) +{ + struct virtio_gpu_get_capset_info *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + void *resp_buf; + + resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info), + GFP_KERNEL); + if (!resp_buf) + return -ENOMEM; + + cmd_p = virtio_gpu_alloc_cmd_resp + (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf, + sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info), + resp_buf); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO); + cmd_p->capset_index = cpu_to_le32(idx); + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + return 0; +} + +int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev, + int idx, int version, + struct virtio_gpu_drv_cap_cache **cache_p) +{ + struct virtio_gpu_get_capset *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + int max_size; + struct virtio_gpu_drv_cap_cache *cache_ent; + struct virtio_gpu_drv_cap_cache *search_ent; + void *resp_buf; + + *cache_p = NULL; + + if (idx >= vgdev->num_capsets) + return -EINVAL; + + if (version > vgdev->capsets[idx].max_version) + return -EINVAL; + + cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL); + if (!cache_ent) + return -ENOMEM; + + max_size = vgdev->capsets[idx].max_size; + cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL); + if (!cache_ent->caps_cache) { + kfree(cache_ent); + return -ENOMEM; + } + + resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size, + GFP_KERNEL); + if (!resp_buf) { + kfree(cache_ent->caps_cache); + kfree(cache_ent); + return -ENOMEM; + } + + cache_ent->version = version; + cache_ent->id = vgdev->capsets[idx].id; + atomic_set(&cache_ent->is_valid, 0); + cache_ent->size = max_size; + spin_lock(&vgdev->display_info_lock); + /* Search while under lock in case it was added by another task. */ + list_for_each_entry(search_ent, &vgdev->cap_cache, head) { + if (search_ent->id == vgdev->capsets[idx].id && + search_ent->version == version) { + *cache_p = search_ent; + break; + } + } + if (!*cache_p) + list_add_tail(&cache_ent->head, &vgdev->cap_cache); + spin_unlock(&vgdev->display_info_lock); + + if (*cache_p) { + /* Entry was found, so free everything that was just created. */ + kfree(resp_buf); + kfree(cache_ent->caps_cache); + kfree(cache_ent); + return 0; + } + + cmd_p = virtio_gpu_alloc_cmd_resp + (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p), + sizeof(struct virtio_gpu_resp_capset) + max_size, + resp_buf); + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET); + cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id); + cmd_p->capset_version = cpu_to_le32(version); + *cache_p = cache_ent; + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + + return 0; +} + +int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev) +{ + struct virtio_gpu_cmd_get_edid *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + void *resp_buf; + int scanout; + + if (WARN_ON(!vgdev->has_edid)) + return -EINVAL; + + for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) { + resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid), + GFP_KERNEL); + if (!resp_buf) + return -ENOMEM; + + cmd_p = virtio_gpu_alloc_cmd_resp + (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf, + sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid), + resp_buf); + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID); + cmd_p->scanout = cpu_to_le32(scanout); + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + } + + return 0; +} + +void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id, + uint32_t context_init, uint32_t nlen, + const char *name) +{ + struct virtio_gpu_ctx_create *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE); + cmd_p->hdr.ctx_id = cpu_to_le32(id); + cmd_p->nlen = cpu_to_le32(nlen); + cmd_p->context_init = cpu_to_le32(context_init); + strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1); + cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0; + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); +} + +void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev, + uint32_t id) +{ + struct virtio_gpu_ctx_destroy *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY); + cmd_p->hdr.ctx_id = cpu_to_le32(id); + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); +} + +void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev, + uint32_t ctx_id, + struct virtio_gpu_object_array *objs) +{ + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); + struct virtio_gpu_ctx_resource *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE); + cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); +} + +void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev, + uint32_t ctx_id, + struct virtio_gpu_object_array *objs) +{ + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); + struct virtio_gpu_ctx_resource *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE); + cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); +} + +void +virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo, + struct virtio_gpu_object_params *params, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) +{ + struct virtio_gpu_resource_create_3d *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + vbuf->objs = objs; + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->format = cpu_to_le32(params->format); + cmd_p->width = cpu_to_le32(params->width); + cmd_p->height = cpu_to_le32(params->height); + + cmd_p->target = cpu_to_le32(params->target); + cmd_p->bind = cpu_to_le32(params->bind); + cmd_p->depth = cpu_to_le32(params->depth); + cmd_p->array_size = cpu_to_le32(params->array_size); + cmd_p->last_level = cpu_to_le32(params->last_level); + cmd_p->nr_samples = cpu_to_le32(params->nr_samples); + cmd_p->flags = cpu_to_le32(params->flags); + + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); + + bo->created = true; +} + +void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, + uint32_t ctx_id, + uint64_t offset, uint32_t level, + uint32_t stride, + uint32_t layer_stride, + struct drm_virtgpu_3d_box *box, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) +{ + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); + struct virtio_gpu_transfer_host_3d *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); + + if (virtio_gpu_is_shmem(bo) && use_dma_api) + dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, + bo->base.sgt, DMA_TO_DEVICE); + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + vbuf->objs = objs; + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D); + cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + convert_to_hw_box(&cmd_p->box, box); + cmd_p->offset = cpu_to_le64(offset); + cmd_p->level = cpu_to_le32(level); + cmd_p->stride = cpu_to_le32(stride); + cmd_p->layer_stride = cpu_to_le32(layer_stride); + + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); +} + +void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev, + uint32_t ctx_id, + uint64_t offset, uint32_t level, + uint32_t stride, + uint32_t layer_stride, + struct drm_virtgpu_3d_box *box, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) +{ + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); + struct virtio_gpu_transfer_host_3d *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + vbuf->objs = objs; + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D); + cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + convert_to_hw_box(&cmd_p->box, box); + cmd_p->offset = cpu_to_le64(offset); + cmd_p->level = cpu_to_le32(level); + cmd_p->stride = cpu_to_le32(stride); + cmd_p->layer_stride = cpu_to_le32(layer_stride); + + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); +} + +void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev, + void *data, uint32_t data_size, + uint32_t ctx_id, + struct virtio_gpu_object_array *objs, + struct virtio_gpu_fence *fence) +{ + struct virtio_gpu_cmd_submit *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + vbuf->data_buf = data; + vbuf->data_size = data_size; + vbuf->objs = objs; + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D); + cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id); + cmd_p->size = cpu_to_le32(data_size); + + virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence); +} + +void virtio_gpu_object_attach(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *obj, + struct virtio_gpu_mem_entry *ents, + unsigned int nents) +{ + virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle, + ents, nents, NULL); +} + +void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev, + struct virtio_gpu_output *output) +{ + struct virtio_gpu_vbuffer *vbuf; + struct virtio_gpu_update_cursor *cur_p; + + output->cursor.pos.scanout_id = cpu_to_le32(output->index); + cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf); + memcpy(cur_p, &output->cursor, sizeof(output->cursor)); + virtio_gpu_queue_cursor(vgdev, vbuf); +} + +static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + struct virtio_gpu_object *obj = + gem_to_virtio_gpu_obj(vbuf->objs->objs[0]); + struct virtio_gpu_resp_resource_uuid *resp = + (struct virtio_gpu_resp_resource_uuid *)vbuf->resp_buf; + uint32_t resp_type = le32_to_cpu(resp->hdr.type); + + spin_lock(&vgdev->resource_export_lock); + WARN_ON(obj->uuid_state != STATE_INITIALIZING); + + if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID && + obj->uuid_state == STATE_INITIALIZING) { + import_uuid(&obj->uuid, resp->uuid); + obj->uuid_state = STATE_OK; + } else { + obj->uuid_state = STATE_ERR; + } + spin_unlock(&vgdev->resource_export_lock); + + wake_up_all(&vgdev->resp_wq); +} + +int +virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_array *objs) +{ + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); + struct virtio_gpu_resource_assign_uuid *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + struct virtio_gpu_resp_resource_uuid *resp_buf; + + resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL); + if (!resp_buf) { + spin_lock(&vgdev->resource_export_lock); + bo->uuid_state = STATE_ERR; + spin_unlock(&vgdev->resource_export_lock); + virtio_gpu_array_put_free(objs); + return -ENOMEM; + } + + cmd_p = virtio_gpu_alloc_cmd_resp + (vgdev, virtio_gpu_cmd_resource_uuid_cb, &vbuf, sizeof(*cmd_p), + sizeof(struct virtio_gpu_resp_resource_uuid), resp_buf); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ASSIGN_UUID); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + + vbuf->objs = objs; + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + return 0; +} + +static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev, + struct virtio_gpu_vbuffer *vbuf) +{ + struct virtio_gpu_object *bo = + gem_to_virtio_gpu_obj(vbuf->objs->objs[0]); + struct virtio_gpu_resp_map_info *resp = + (struct virtio_gpu_resp_map_info *)vbuf->resp_buf; + struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); + uint32_t resp_type = le32_to_cpu(resp->hdr.type); + + spin_lock(&vgdev->host_visible_lock); + + if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) { + vram->map_info = resp->map_info; + vram->map_state = STATE_OK; + } else { + vram->map_state = STATE_ERR; + } + + spin_unlock(&vgdev->host_visible_lock); + wake_up_all(&vgdev->resp_wq); +} + +int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_array *objs, uint64_t offset) +{ + struct virtio_gpu_resource_map_blob *cmd_p; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]); + struct virtio_gpu_vbuffer *vbuf; + struct virtio_gpu_resp_map_info *resp_buf; + + resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL); + if (!resp_buf) + return -ENOMEM; + + cmd_p = virtio_gpu_alloc_cmd_resp + (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p), + sizeof(struct virtio_gpu_resp_map_info), resp_buf); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->offset = cpu_to_le64(offset); + vbuf->objs = objs; + + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + return 0; +} + +void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo) +{ + struct virtio_gpu_resource_unmap_blob *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); +} + +void +virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object *bo, + struct virtio_gpu_object_params *params, + struct virtio_gpu_mem_entry *ents, + uint32_t nents) +{ + struct virtio_gpu_resource_create_blob *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB); + cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->blob_mem = cpu_to_le32(params->blob_mem); + cmd_p->blob_flags = cpu_to_le32(params->blob_flags); + cmd_p->blob_id = cpu_to_le64(params->blob_id); + cmd_p->size = cpu_to_le64(params->size); + cmd_p->nr_entries = cpu_to_le32(nents); + + vbuf->data_buf = ents; + vbuf->data_size = sizeof(*ents) * nents; + + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); + bo->created = true; +} + +void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev, + uint32_t scanout_id, + struct virtio_gpu_object *bo, + struct drm_framebuffer *fb, + uint32_t width, uint32_t height, + uint32_t x, uint32_t y) +{ + uint32_t i; + struct virtio_gpu_set_scanout_blob *cmd_p; + struct virtio_gpu_vbuffer *vbuf; + uint32_t format = virtio_gpu_translate_format(fb->format->format); + + cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); + memset(cmd_p, 0, sizeof(*cmd_p)); + + cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB); + cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle); + cmd_p->scanout_id = cpu_to_le32(scanout_id); + + cmd_p->format = cpu_to_le32(format); + cmd_p->width = cpu_to_le32(fb->width); + cmd_p->height = cpu_to_le32(fb->height); + + for (i = 0; i < 4; i++) { + cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]); + cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]); + } + + cmd_p->r.width = cpu_to_le32(width); + cmd_p->r.height = cpu_to_le32(height); + cmd_p->r.x = cpu_to_le32(x); + cmd_p->r.y = cpu_to_le32(y); + + virtio_gpu_queue_ctrl_buffer(vgdev, vbuf); +} diff --git a/drivers/gpu/drm/virtio/virtgpu_vram.c b/drivers/gpu/drm/virtio/virtgpu_vram.c new file mode 100644 index 000000000..6b45b0429 --- /dev/null +++ b/drivers/gpu/drm/virtio/virtgpu_vram.c @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: GPL-2.0 +#include "virtgpu_drv.h" + +#include <linux/dma-mapping.h> + +static void virtio_gpu_vram_free(struct drm_gem_object *obj) +{ + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + struct virtio_gpu_device *vgdev = obj->dev->dev_private; + struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); + bool unmap; + + if (bo->created) { + spin_lock(&vgdev->host_visible_lock); + unmap = drm_mm_node_allocated(&vram->vram_node); + spin_unlock(&vgdev->host_visible_lock); + + if (unmap) + virtio_gpu_cmd_unmap(vgdev, bo); + + virtio_gpu_cmd_unref_resource(vgdev, bo); + virtio_gpu_notify(vgdev); + return; + } +} + +static const struct vm_operations_struct virtio_gpu_vram_vm_ops = { + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +}; + +static int virtio_gpu_vram_mmap(struct drm_gem_object *obj, + struct vm_area_struct *vma) +{ + int ret; + struct virtio_gpu_device *vgdev = obj->dev->dev_private; + struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); + struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); + unsigned long vm_size = vma->vm_end - vma->vm_start; + + if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) + return -EINVAL; + + wait_event(vgdev->resp_wq, vram->map_state != STATE_INITIALIZING); + if (vram->map_state != STATE_OK) + return -EINVAL; + + vma->vm_pgoff -= drm_vma_node_start(&obj->vma_node); + vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot); + vma->vm_ops = &virtio_gpu_vram_vm_ops; + + if (vram->map_info == VIRTIO_GPU_MAP_CACHE_WC) + vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); + else if (vram->map_info == VIRTIO_GPU_MAP_CACHE_UNCACHED) + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + + /* Partial mappings of GEM buffers don't happen much in practice. */ + if (vm_size != vram->vram_node.size) + return -EINVAL; + + ret = io_remap_pfn_range(vma, vma->vm_start, + vram->vram_node.start >> PAGE_SHIFT, + vm_size, vma->vm_page_prot); + return ret; +} + +struct sg_table *virtio_gpu_vram_map_dma_buf(struct virtio_gpu_object *bo, + struct device *dev, + enum dma_data_direction dir) +{ + struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; + struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); + struct sg_table *sgt; + dma_addr_t addr; + int ret; + + sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); + if (!sgt) + return ERR_PTR(-ENOMEM); + + if (!(bo->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE)) { + // Virtio devices can access the dma-buf via its UUID. Return a stub + // sg_table so the dma-buf API still works. + if (!is_virtio_device(dev) || !vgdev->has_resource_assign_uuid) { + ret = -EIO; + goto out; + } + return sgt; + } + + ret = sg_alloc_table(sgt, 1, GFP_KERNEL); + if (ret) + goto out; + + addr = dma_map_resource(dev, vram->vram_node.start, + vram->vram_node.size, dir, + DMA_ATTR_SKIP_CPU_SYNC); + ret = dma_mapping_error(dev, addr); + if (ret) + goto out; + + sg_set_page(sgt->sgl, NULL, vram->vram_node.size, 0); + sg_dma_address(sgt->sgl) = addr; + sg_dma_len(sgt->sgl) = vram->vram_node.size; + + return sgt; +out: + sg_free_table(sgt); + kfree(sgt); + return ERR_PTR(ret); +} + +void virtio_gpu_vram_unmap_dma_buf(struct device *dev, + struct sg_table *sgt, + enum dma_data_direction dir) +{ + if (sgt->nents) { + dma_unmap_resource(dev, sg_dma_address(sgt->sgl), + sg_dma_len(sgt->sgl), dir, + DMA_ATTR_SKIP_CPU_SYNC); + } + sg_free_table(sgt); + kfree(sgt); +} + +static const struct drm_gem_object_funcs virtio_gpu_vram_funcs = { + .open = virtio_gpu_gem_object_open, + .close = virtio_gpu_gem_object_close, + .free = virtio_gpu_vram_free, + .mmap = virtio_gpu_vram_mmap, + .export = virtgpu_gem_prime_export, +}; + +bool virtio_gpu_is_vram(struct virtio_gpu_object *bo) +{ + return bo->base.base.funcs == &virtio_gpu_vram_funcs; +} + +static int virtio_gpu_vram_map(struct virtio_gpu_object *bo) +{ + int ret; + uint64_t offset; + struct virtio_gpu_object_array *objs; + struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; + struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo); + + if (!vgdev->has_host_visible) + return -EINVAL; + + spin_lock(&vgdev->host_visible_lock); + ret = drm_mm_insert_node(&vgdev->host_visible_mm, &vram->vram_node, + bo->base.base.size); + spin_unlock(&vgdev->host_visible_lock); + + if (ret) + return ret; + + objs = virtio_gpu_array_alloc(1); + if (!objs) { + ret = -ENOMEM; + goto err_remove_node; + } + + virtio_gpu_array_add_obj(objs, &bo->base.base); + /*TODO: Add an error checking helper function in drm_mm.h */ + offset = vram->vram_node.start - vgdev->host_visible_region.addr; + + ret = virtio_gpu_cmd_map(vgdev, objs, offset); + if (ret) { + virtio_gpu_array_put_free(objs); + goto err_remove_node; + } + + return 0; + +err_remove_node: + spin_lock(&vgdev->host_visible_lock); + drm_mm_remove_node(&vram->vram_node); + spin_unlock(&vgdev->host_visible_lock); + return ret; +} + +int virtio_gpu_vram_create(struct virtio_gpu_device *vgdev, + struct virtio_gpu_object_params *params, + struct virtio_gpu_object **bo_ptr) +{ + struct drm_gem_object *obj; + struct virtio_gpu_object_vram *vram; + int ret; + + vram = kzalloc(sizeof(*vram), GFP_KERNEL); + if (!vram) + return -ENOMEM; + + obj = &vram->base.base.base; + obj->funcs = &virtio_gpu_vram_funcs; + + params->size = PAGE_ALIGN(params->size); + drm_gem_private_object_init(vgdev->ddev, obj, params->size); + + /* Create fake offset */ + ret = drm_gem_create_mmap_offset(obj); + if (ret) { + kfree(vram); + return ret; + } + + ret = virtio_gpu_resource_id_get(vgdev, &vram->base.hw_res_handle); + if (ret) { + kfree(vram); + return ret; + } + + virtio_gpu_cmd_resource_create_blob(vgdev, &vram->base, params, NULL, + 0); + if (params->blob_flags & VIRTGPU_BLOB_FLAG_USE_MAPPABLE) { + ret = virtio_gpu_vram_map(&vram->base); + if (ret) { + virtio_gpu_vram_free(obj); + return ret; + } + } + + *bo_ptr = &vram->base; + return 0; +} |