summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/xen
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/gpu/drm/xen
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/xen')
-rw-r--r--drivers/gpu/drm/xen/Kconfig15
-rw-r--r--drivers/gpu/drm/xen/Makefile10
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.c795
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front.h156
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_cfg.c77
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_cfg.h37
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_conn.c117
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_conn.h25
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_evtchnl.c366
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_evtchnl.h81
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c304
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.h40
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c381
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.h26
14 files changed, 2430 insertions, 0 deletions
diff --git a/drivers/gpu/drm/xen/Kconfig b/drivers/gpu/drm/xen/Kconfig
new file mode 100644
index 0000000000..7eb0ce3455
--- /dev/null
+++ b/drivers/gpu/drm/xen/Kconfig
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config DRM_XEN
+ bool
+
+config DRM_XEN_FRONTEND
+ tristate "Para-virtualized frontend driver for Xen guest OS"
+ depends on XEN && DRM
+ select DRM_XEN
+ select DRM_KMS_HELPER
+ select VIDEOMODE_HELPERS
+ select XEN_XENBUS_FRONTEND
+ select XEN_FRONT_PGDIR_SHBUF
+ help
+ Choose this option if you want to enable a para-virtualized
+ frontend DRM/KMS driver for Xen guest OSes.
diff --git a/drivers/gpu/drm/xen/Makefile b/drivers/gpu/drm/xen/Makefile
new file mode 100644
index 0000000000..825905f67f
--- /dev/null
+++ b/drivers/gpu/drm/xen/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0 OR MIT
+
+drm_xen_front-objs := xen_drm_front.o \
+ xen_drm_front_kms.o \
+ xen_drm_front_conn.o \
+ xen_drm_front_evtchnl.o \
+ xen_drm_front_cfg.o \
+ xen_drm_front_gem.o
+
+obj-$(CONFIG_DRM_XEN_FRONTEND) += drm_xen_front.o
diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c
new file mode 100644
index 0000000000..aab79c5e34
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front.c
@@ -0,0 +1,795 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ * Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_file.h>
+#include <drm/drm_gem.h>
+
+#include <xen/platform_pci.h>
+#include <xen/xen.h>
+#include <xen/xenbus.h>
+
+#include <xen/xen-front-pgdir-shbuf.h>
+#include <xen/interface/io/displif.h>
+
+#include "xen_drm_front.h"
+#include "xen_drm_front_cfg.h"
+#include "xen_drm_front_evtchnl.h"
+#include "xen_drm_front_gem.h"
+#include "xen_drm_front_kms.h"
+
+struct xen_drm_front_dbuf {
+ struct list_head list;
+ u64 dbuf_cookie;
+ u64 fb_cookie;
+
+ struct xen_front_pgdir_shbuf shbuf;
+};
+
+static void dbuf_add_to_list(struct xen_drm_front_info *front_info,
+ struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie)
+{
+ dbuf->dbuf_cookie = dbuf_cookie;
+ list_add(&dbuf->list, &front_info->dbuf_list);
+}
+
+static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
+ u64 dbuf_cookie)
+{
+ struct xen_drm_front_dbuf *buf, *q;
+
+ list_for_each_entry_safe(buf, q, dbuf_list, list)
+ if (buf->dbuf_cookie == dbuf_cookie)
+ return buf;
+
+ return NULL;
+}
+
+static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
+{
+ struct xen_drm_front_dbuf *buf, *q;
+
+ list_for_each_entry_safe(buf, q, dbuf_list, list)
+ if (buf->dbuf_cookie == dbuf_cookie) {
+ list_del(&buf->list);
+ xen_front_pgdir_shbuf_unmap(&buf->shbuf);
+ xen_front_pgdir_shbuf_free(&buf->shbuf);
+ kfree(buf);
+ break;
+ }
+}
+
+static void dbuf_free_all(struct list_head *dbuf_list)
+{
+ struct xen_drm_front_dbuf *buf, *q;
+
+ list_for_each_entry_safe(buf, q, dbuf_list, list) {
+ list_del(&buf->list);
+ xen_front_pgdir_shbuf_unmap(&buf->shbuf);
+ xen_front_pgdir_shbuf_free(&buf->shbuf);
+ kfree(buf);
+ }
+}
+
+static struct xendispl_req *
+be_prepare_req(struct xen_drm_front_evtchnl *evtchnl, u8 operation)
+{
+ struct xendispl_req *req;
+
+ req = RING_GET_REQUEST(&evtchnl->u.req.ring,
+ evtchnl->u.req.ring.req_prod_pvt);
+ req->operation = operation;
+ req->id = evtchnl->evt_next_id++;
+ evtchnl->evt_id = req->id;
+ return req;
+}
+
+static int be_stream_do_io(struct xen_drm_front_evtchnl *evtchnl,
+ struct xendispl_req *req)
+{
+ reinit_completion(&evtchnl->u.req.completion);
+ if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
+ return -EIO;
+
+ xen_drm_front_evtchnl_flush(evtchnl);
+ return 0;
+}
+
+static int be_stream_wait_io(struct xen_drm_front_evtchnl *evtchnl)
+{
+ if (wait_for_completion_timeout(&evtchnl->u.req.completion,
+ msecs_to_jiffies(XEN_DRM_FRONT_WAIT_BACK_MS)) <= 0)
+ return -ETIMEDOUT;
+
+ return evtchnl->u.req.resp_status;
+}
+
+int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
+ u32 x, u32 y, u32 width, u32 height,
+ u32 bpp, u64 fb_cookie)
+{
+ struct xen_drm_front_evtchnl *evtchnl;
+ struct xen_drm_front_info *front_info;
+ struct xendispl_req *req;
+ unsigned long flags;
+ int ret;
+
+ front_info = pipeline->drm_info->front_info;
+ evtchnl = &front_info->evt_pairs[pipeline->index].req;
+ if (unlikely(!evtchnl))
+ return -EIO;
+
+ mutex_lock(&evtchnl->u.req.req_io_lock);
+
+ spin_lock_irqsave(&front_info->io_lock, flags);
+ req = be_prepare_req(evtchnl, XENDISPL_OP_SET_CONFIG);
+ req->op.set_config.x = x;
+ req->op.set_config.y = y;
+ req->op.set_config.width = width;
+ req->op.set_config.height = height;
+ req->op.set_config.bpp = bpp;
+ req->op.set_config.fb_cookie = fb_cookie;
+
+ ret = be_stream_do_io(evtchnl, req);
+ spin_unlock_irqrestore(&front_info->io_lock, flags);
+
+ if (ret == 0)
+ ret = be_stream_wait_io(evtchnl);
+
+ mutex_unlock(&evtchnl->u.req.req_io_lock);
+ return ret;
+}
+
+int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
+ u64 dbuf_cookie, u32 width, u32 height,
+ u32 bpp, u64 size, u32 offset,
+ struct page **pages)
+{
+ struct xen_drm_front_evtchnl *evtchnl;
+ struct xen_drm_front_dbuf *dbuf;
+ struct xendispl_req *req;
+ struct xen_front_pgdir_shbuf_cfg buf_cfg;
+ unsigned long flags;
+ int ret;
+
+ evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
+ if (unlikely(!evtchnl))
+ return -EIO;
+
+ dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
+ if (!dbuf)
+ return -ENOMEM;
+
+ dbuf_add_to_list(front_info, dbuf, dbuf_cookie);
+
+ memset(&buf_cfg, 0, sizeof(buf_cfg));
+ buf_cfg.xb_dev = front_info->xb_dev;
+ buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
+ buf_cfg.pages = pages;
+ buf_cfg.pgdir = &dbuf->shbuf;
+ buf_cfg.be_alloc = front_info->cfg.be_alloc;
+
+ ret = xen_front_pgdir_shbuf_alloc(&buf_cfg);
+ if (ret < 0)
+ goto fail_shbuf_alloc;
+
+ mutex_lock(&evtchnl->u.req.req_io_lock);
+
+ spin_lock_irqsave(&front_info->io_lock, flags);
+ req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE);
+ req->op.dbuf_create.gref_directory =
+ xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf);
+ req->op.dbuf_create.buffer_sz = size;
+ req->op.dbuf_create.data_ofs = offset;
+ req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
+ req->op.dbuf_create.width = width;
+ req->op.dbuf_create.height = height;
+ req->op.dbuf_create.bpp = bpp;
+ if (buf_cfg.be_alloc)
+ req->op.dbuf_create.flags |= XENDISPL_DBUF_FLG_REQ_ALLOC;
+
+ ret = be_stream_do_io(evtchnl, req);
+ spin_unlock_irqrestore(&front_info->io_lock, flags);
+
+ if (ret < 0)
+ goto fail;
+
+ ret = be_stream_wait_io(evtchnl);
+ if (ret < 0)
+ goto fail;
+
+ ret = xen_front_pgdir_shbuf_map(&dbuf->shbuf);
+ if (ret < 0)
+ goto fail;
+
+ mutex_unlock(&evtchnl->u.req.req_io_lock);
+ return 0;
+
+fail:
+ mutex_unlock(&evtchnl->u.req.req_io_lock);
+fail_shbuf_alloc:
+ dbuf_free(&front_info->dbuf_list, dbuf_cookie);
+ return ret;
+}
+
+static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info,
+ u64 dbuf_cookie)
+{
+ struct xen_drm_front_evtchnl *evtchnl;
+ struct xendispl_req *req;
+ unsigned long flags;
+ bool be_alloc;
+ int ret;
+
+ evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
+ if (unlikely(!evtchnl))
+ return -EIO;
+
+ be_alloc = front_info->cfg.be_alloc;
+
+ /*
+ * For the backend allocated buffer release references now, so backend
+ * can free the buffer.
+ */
+ if (be_alloc)
+ dbuf_free(&front_info->dbuf_list, dbuf_cookie);
+
+ mutex_lock(&evtchnl->u.req.req_io_lock);
+
+ spin_lock_irqsave(&front_info->io_lock, flags);
+ req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_DESTROY);
+ req->op.dbuf_destroy.dbuf_cookie = dbuf_cookie;
+
+ ret = be_stream_do_io(evtchnl, req);
+ spin_unlock_irqrestore(&front_info->io_lock, flags);
+
+ if (ret == 0)
+ ret = be_stream_wait_io(evtchnl);
+
+ /*
+ * Do this regardless of communication status with the backend:
+ * if we cannot remove remote resources remove what we can locally.
+ */
+ if (!be_alloc)
+ dbuf_free(&front_info->dbuf_list, dbuf_cookie);
+
+ mutex_unlock(&evtchnl->u.req.req_io_lock);
+ return ret;
+}
+
+int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
+ u64 dbuf_cookie, u64 fb_cookie, u32 width,
+ u32 height, u32 pixel_format)
+{
+ struct xen_drm_front_evtchnl *evtchnl;
+ struct xen_drm_front_dbuf *buf;
+ struct xendispl_req *req;
+ unsigned long flags;
+ int ret;
+
+ evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
+ if (unlikely(!evtchnl))
+ return -EIO;
+
+ buf = dbuf_get(&front_info->dbuf_list, dbuf_cookie);
+ if (!buf)
+ return -EINVAL;
+
+ buf->fb_cookie = fb_cookie;
+
+ mutex_lock(&evtchnl->u.req.req_io_lock);
+
+ spin_lock_irqsave(&front_info->io_lock, flags);
+ req = be_prepare_req(evtchnl, XENDISPL_OP_FB_ATTACH);
+ req->op.fb_attach.dbuf_cookie = dbuf_cookie;
+ req->op.fb_attach.fb_cookie = fb_cookie;
+ req->op.fb_attach.width = width;
+ req->op.fb_attach.height = height;
+ req->op.fb_attach.pixel_format = pixel_format;
+
+ ret = be_stream_do_io(evtchnl, req);
+ spin_unlock_irqrestore(&front_info->io_lock, flags);
+
+ if (ret == 0)
+ ret = be_stream_wait_io(evtchnl);
+
+ mutex_unlock(&evtchnl->u.req.req_io_lock);
+ return ret;
+}
+
+int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info,
+ u64 fb_cookie)
+{
+ struct xen_drm_front_evtchnl *evtchnl;
+ struct xendispl_req *req;
+ unsigned long flags;
+ int ret;
+
+ evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
+ if (unlikely(!evtchnl))
+ return -EIO;
+
+ mutex_lock(&evtchnl->u.req.req_io_lock);
+
+ spin_lock_irqsave(&front_info->io_lock, flags);
+ req = be_prepare_req(evtchnl, XENDISPL_OP_FB_DETACH);
+ req->op.fb_detach.fb_cookie = fb_cookie;
+
+ ret = be_stream_do_io(evtchnl, req);
+ spin_unlock_irqrestore(&front_info->io_lock, flags);
+
+ if (ret == 0)
+ ret = be_stream_wait_io(evtchnl);
+
+ mutex_unlock(&evtchnl->u.req.req_io_lock);
+ return ret;
+}
+
+int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
+ int conn_idx, u64 fb_cookie)
+{
+ struct xen_drm_front_evtchnl *evtchnl;
+ struct xendispl_req *req;
+ unsigned long flags;
+ int ret;
+
+ if (unlikely(conn_idx >= front_info->num_evt_pairs))
+ return -EINVAL;
+
+ evtchnl = &front_info->evt_pairs[conn_idx].req;
+
+ mutex_lock(&evtchnl->u.req.req_io_lock);
+
+ spin_lock_irqsave(&front_info->io_lock, flags);
+ req = be_prepare_req(evtchnl, XENDISPL_OP_PG_FLIP);
+ req->op.pg_flip.fb_cookie = fb_cookie;
+
+ ret = be_stream_do_io(evtchnl, req);
+ spin_unlock_irqrestore(&front_info->io_lock, flags);
+
+ if (ret == 0)
+ ret = be_stream_wait_io(evtchnl);
+
+ mutex_unlock(&evtchnl->u.req.req_io_lock);
+ return ret;
+}
+
+void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
+ int conn_idx, u64 fb_cookie)
+{
+ struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
+
+ if (unlikely(conn_idx >= front_info->cfg.num_connectors))
+ return;
+
+ xen_drm_front_kms_on_frame_done(&drm_info->pipeline[conn_idx],
+ fb_cookie);
+}
+
+void xen_drm_front_gem_object_free(struct drm_gem_object *obj)
+{
+ struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private;
+ int idx;
+
+ if (drm_dev_enter(obj->dev, &idx)) {
+ xen_drm_front_dbuf_destroy(drm_info->front_info,
+ xen_drm_front_dbuf_to_cookie(obj));
+ drm_dev_exit(idx);
+ } else {
+ dbuf_free(&drm_info->front_info->dbuf_list,
+ xen_drm_front_dbuf_to_cookie(obj));
+ }
+
+ xen_drm_front_gem_free_object_unlocked(obj);
+}
+
+static int xen_drm_drv_dumb_create(struct drm_file *filp,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct xen_drm_front_drm_info *drm_info = dev->dev_private;
+ struct drm_gem_object *obj;
+ int ret;
+
+ /*
+ * Dumb creation is a two stage process: first we create a fully
+ * constructed GEM object which is communicated to the backend, and
+ * only after that we can create GEM's handle. This is done so,
+ * because of the possible races: once you create a handle it becomes
+ * immediately visible to user-space, so the latter can try accessing
+ * object without pages etc.
+ * For details also see drm_gem_handle_create
+ */
+ args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
+ args->size = args->pitch * args->height;
+
+ obj = xen_drm_front_gem_create(dev, args->size);
+ if (IS_ERR(obj)) {
+ ret = PTR_ERR(obj);
+ goto fail;
+ }
+
+ ret = xen_drm_front_dbuf_create(drm_info->front_info,
+ xen_drm_front_dbuf_to_cookie(obj),
+ args->width, args->height, args->bpp,
+ args->size, 0,
+ xen_drm_front_gem_get_pages(obj));
+ if (ret)
+ goto fail_backend;
+
+ /* This is the tail of GEM object creation */
+ ret = drm_gem_handle_create(filp, obj, &args->handle);
+ if (ret)
+ goto fail_handle;
+
+ /* Drop reference from allocate - handle holds it now */
+ drm_gem_object_put(obj);
+ return 0;
+
+fail_handle:
+ xen_drm_front_dbuf_destroy(drm_info->front_info,
+ xen_drm_front_dbuf_to_cookie(obj));
+fail_backend:
+ /* drop reference from allocate */
+ drm_gem_object_put(obj);
+fail:
+ DRM_ERROR("Failed to create dumb buffer: %d\n", ret);
+ return ret;
+}
+
+static void xen_drm_drv_release(struct drm_device *dev)
+{
+ struct xen_drm_front_drm_info *drm_info = dev->dev_private;
+ struct xen_drm_front_info *front_info = drm_info->front_info;
+
+ xen_drm_front_kms_fini(drm_info);
+
+ drm_atomic_helper_shutdown(dev);
+ drm_mode_config_cleanup(dev);
+
+ if (front_info->cfg.be_alloc)
+ xenbus_switch_state(front_info->xb_dev,
+ XenbusStateInitialising);
+
+ kfree(drm_info);
+}
+
+DEFINE_DRM_GEM_FOPS(xen_drm_dev_fops);
+
+static const struct drm_driver xen_drm_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .release = xen_drm_drv_release,
+ .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
+ .dumb_create = xen_drm_drv_dumb_create,
+ .fops = &xen_drm_dev_fops,
+ .name = "xendrm-du",
+ .desc = "Xen PV DRM Display Unit",
+ .date = "20180221",
+ .major = 1,
+ .minor = 0,
+
+};
+
+static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
+{
+ struct device *dev = &front_info->xb_dev->dev;
+ struct xen_drm_front_drm_info *drm_info;
+ struct drm_device *drm_dev;
+ int ret;
+
+ if (drm_firmware_drivers_only())
+ return -ENODEV;
+
+ DRM_INFO("Creating %s\n", xen_drm_driver.desc);
+
+ drm_info = kzalloc(sizeof(*drm_info), GFP_KERNEL);
+ if (!drm_info) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ drm_info->front_info = front_info;
+ front_info->drm_info = drm_info;
+
+ drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
+ if (IS_ERR(drm_dev)) {
+ ret = PTR_ERR(drm_dev);
+ goto fail_dev;
+ }
+
+ drm_info->drm_dev = drm_dev;
+
+ drm_dev->dev_private = drm_info;
+
+ ret = xen_drm_front_kms_init(drm_info);
+ if (ret) {
+ DRM_ERROR("Failed to initialize DRM/KMS, ret %d\n", ret);
+ goto fail_modeset;
+ }
+
+ ret = drm_dev_register(drm_dev, 0);
+ if (ret)
+ goto fail_register;
+
+ DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
+ xen_drm_driver.name, xen_drm_driver.major,
+ xen_drm_driver.minor, xen_drm_driver.patchlevel,
+ xen_drm_driver.date, drm_dev->primary->index);
+
+ return 0;
+
+fail_register:
+ drm_dev_unregister(drm_dev);
+fail_modeset:
+ drm_kms_helper_poll_fini(drm_dev);
+ drm_mode_config_cleanup(drm_dev);
+ drm_dev_put(drm_dev);
+fail_dev:
+ kfree(drm_info);
+ front_info->drm_info = NULL;
+fail:
+ return ret;
+}
+
+static void xen_drm_drv_fini(struct xen_drm_front_info *front_info)
+{
+ struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
+ struct drm_device *dev;
+
+ if (!drm_info)
+ return;
+
+ dev = drm_info->drm_dev;
+ if (!dev)
+ return;
+
+ /* Nothing to do if device is already unplugged */
+ if (drm_dev_is_unplugged(dev))
+ return;
+
+ drm_kms_helper_poll_fini(dev);
+ drm_dev_unplug(dev);
+ drm_dev_put(dev);
+
+ front_info->drm_info = NULL;
+
+ xen_drm_front_evtchnl_free_all(front_info);
+ dbuf_free_all(&front_info->dbuf_list);
+
+ /*
+ * If we are not using backend allocated buffers, then tell the
+ * backend we are ready to (re)initialize. Otherwise, wait for
+ * drm_driver.release.
+ */
+ if (!front_info->cfg.be_alloc)
+ xenbus_switch_state(front_info->xb_dev,
+ XenbusStateInitialising);
+}
+
+static int displback_initwait(struct xen_drm_front_info *front_info)
+{
+ struct xen_drm_front_cfg *cfg = &front_info->cfg;
+ int ret;
+
+ cfg->front_info = front_info;
+ ret = xen_drm_front_cfg_card(front_info, cfg);
+ if (ret < 0)
+ return ret;
+
+ DRM_INFO("Have %d connector(s)\n", cfg->num_connectors);
+ /* Create event channels for all connectors and publish */
+ ret = xen_drm_front_evtchnl_create_all(front_info);
+ if (ret < 0)
+ return ret;
+
+ return xen_drm_front_evtchnl_publish_all(front_info);
+}
+
+static int displback_connect(struct xen_drm_front_info *front_info)
+{
+ xen_drm_front_evtchnl_set_state(front_info, EVTCHNL_STATE_CONNECTED);
+ return xen_drm_drv_init(front_info);
+}
+
+static void displback_disconnect(struct xen_drm_front_info *front_info)
+{
+ if (!front_info->drm_info)
+ return;
+
+ /* Tell the backend to wait until we release the DRM driver. */
+ xenbus_switch_state(front_info->xb_dev, XenbusStateReconfiguring);
+
+ xen_drm_drv_fini(front_info);
+}
+
+static void displback_changed(struct xenbus_device *xb_dev,
+ enum xenbus_state backend_state)
+{
+ struct xen_drm_front_info *front_info = dev_get_drvdata(&xb_dev->dev);
+ int ret;
+
+ DRM_DEBUG("Backend state is %s, front is %s\n",
+ xenbus_strstate(backend_state),
+ xenbus_strstate(xb_dev->state));
+
+ switch (backend_state) {
+ case XenbusStateReconfiguring:
+ case XenbusStateReconfigured:
+ case XenbusStateInitialised:
+ break;
+
+ case XenbusStateInitialising:
+ if (xb_dev->state == XenbusStateReconfiguring)
+ break;
+
+ /* recovering after backend unexpected closure */
+ displback_disconnect(front_info);
+ break;
+
+ case XenbusStateInitWait:
+ if (xb_dev->state == XenbusStateReconfiguring)
+ break;
+
+ /* recovering after backend unexpected closure */
+ displback_disconnect(front_info);
+ if (xb_dev->state != XenbusStateInitialising)
+ break;
+
+ ret = displback_initwait(front_info);
+ if (ret < 0)
+ xenbus_dev_fatal(xb_dev, ret, "initializing frontend");
+ else
+ xenbus_switch_state(xb_dev, XenbusStateInitialised);
+ break;
+
+ case XenbusStateConnected:
+ if (xb_dev->state != XenbusStateInitialised)
+ break;
+
+ ret = displback_connect(front_info);
+ if (ret < 0) {
+ displback_disconnect(front_info);
+ xenbus_dev_fatal(xb_dev, ret, "connecting backend");
+ } else {
+ xenbus_switch_state(xb_dev, XenbusStateConnected);
+ }
+ break;
+
+ case XenbusStateClosing:
+ /*
+ * in this state backend starts freeing resources,
+ * so let it go into closed state, so we can also
+ * remove ours
+ */
+ break;
+
+ case XenbusStateUnknown:
+ case XenbusStateClosed:
+ if (xb_dev->state == XenbusStateClosed)
+ break;
+
+ displback_disconnect(front_info);
+ break;
+ }
+}
+
+static int xen_drv_probe(struct xenbus_device *xb_dev,
+ const struct xenbus_device_id *id)
+{
+ struct xen_drm_front_info *front_info;
+ struct device *dev = &xb_dev->dev;
+ int ret;
+
+ ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
+ if (ret < 0) {
+ DRM_ERROR("Cannot setup DMA mask, ret %d", ret);
+ return ret;
+ }
+
+ front_info = devm_kzalloc(&xb_dev->dev,
+ sizeof(*front_info), GFP_KERNEL);
+ if (!front_info)
+ return -ENOMEM;
+
+ front_info->xb_dev = xb_dev;
+ spin_lock_init(&front_info->io_lock);
+ INIT_LIST_HEAD(&front_info->dbuf_list);
+ dev_set_drvdata(&xb_dev->dev, front_info);
+
+ return xenbus_switch_state(xb_dev, XenbusStateInitialising);
+}
+
+static void xen_drv_remove(struct xenbus_device *dev)
+{
+ struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev);
+ int to = 100;
+
+ xenbus_switch_state(dev, XenbusStateClosing);
+
+ /*
+ * On driver removal it is disconnected from XenBus,
+ * so no backend state change events come via .otherend_changed
+ * callback. This prevents us from exiting gracefully, e.g.
+ * signaling the backend to free event channels, waiting for its
+ * state to change to XenbusStateClosed and cleaning at our end.
+ * Normally when front driver removed backend will finally go into
+ * XenbusStateInitWait state.
+ *
+ * Workaround: read backend's state manually and wait with time-out.
+ */
+ while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state",
+ XenbusStateUnknown) != XenbusStateInitWait) &&
+ --to)
+ msleep(10);
+
+ if (!to) {
+ unsigned int state;
+
+ state = xenbus_read_unsigned(front_info->xb_dev->otherend,
+ "state", XenbusStateUnknown);
+ DRM_ERROR("Backend state is %s while removing driver\n",
+ xenbus_strstate(state));
+ }
+
+ xen_drm_drv_fini(front_info);
+ xenbus_frontend_closed(dev);
+}
+
+static const struct xenbus_device_id xen_driver_ids[] = {
+ { XENDISPL_DRIVER_NAME },
+ { "" }
+};
+
+static struct xenbus_driver xen_driver = {
+ .ids = xen_driver_ids,
+ .probe = xen_drv_probe,
+ .remove = xen_drv_remove,
+ .otherend_changed = displback_changed,
+ .not_essential = true,
+};
+
+static int __init xen_drv_init(void)
+{
+ /* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */
+ if (XEN_PAGE_SIZE != PAGE_SIZE) {
+ DRM_ERROR(XENDISPL_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n",
+ XEN_PAGE_SIZE, PAGE_SIZE);
+ return -ENODEV;
+ }
+
+ if (!xen_domain())
+ return -ENODEV;
+
+ if (!xen_has_pv_devices())
+ return -ENODEV;
+
+ DRM_INFO("Registering XEN PV " XENDISPL_DRIVER_NAME "\n");
+ return xenbus_register_frontend(&xen_driver);
+}
+
+static void __exit xen_drv_fini(void)
+{
+ DRM_INFO("Unregistering XEN PV " XENDISPL_DRIVER_NAME "\n");
+ xenbus_unregister_driver(&xen_driver);
+}
+
+module_init(xen_drv_init);
+module_exit(xen_drv_fini);
+
+MODULE_DESCRIPTION("Xen para-virtualized display device frontend");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("xen:" XENDISPL_DRIVER_NAME);
diff --git a/drivers/gpu/drm/xen/xen_drm_front.h b/drivers/gpu/drm/xen/xen_drm_front.h
new file mode 100644
index 0000000000..a987c78abe
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ * Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_DRM_FRONT_H_
+#define __XEN_DRM_FRONT_H_
+
+#include <linux/scatterlist.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#include "xen_drm_front_cfg.h"
+
+struct drm_device;
+struct drm_framebuffer;
+struct drm_gem_object;
+struct drm_pending_vblank_event;
+
+/**
+ * DOC: Driver modes of operation in terms of display buffers used
+ *
+ * Depending on the requirements for the para-virtualized environment, namely
+ * requirements dictated by the accompanying DRM/(v)GPU drivers running in both
+ * host and guest environments, display buffers can be allocated by either
+ * frontend driver or backend.
+ */
+
+/**
+ * DOC: Buffers allocated by the frontend driver
+ *
+ * In this mode of operation driver allocates buffers from system memory.
+ *
+ * Note! If used with accompanying DRM/(v)GPU drivers this mode of operation
+ * may require IOMMU support on the platform, so accompanying DRM/vGPU
+ * hardware can still reach display buffer memory while importing PRIME
+ * buffers from the frontend driver.
+ */
+
+/**
+ * DOC: Buffers allocated by the backend
+ *
+ * This mode of operation is run-time configured via guest domain configuration
+ * through XenStore entries.
+ *
+ * For systems which do not provide IOMMU support, but having specific
+ * requirements for display buffers it is possible to allocate such buffers
+ * at backend side and share those with the frontend.
+ * For example, if host domain is 1:1 mapped and has DRM/GPU hardware expecting
+ * physically contiguous memory, this allows implementing zero-copying
+ * use-cases.
+ *
+ * Note, while using this scenario the following should be considered:
+ *
+ * #. If guest domain dies then pages/grants received from the backend
+ * cannot be claimed back
+ *
+ * #. Misbehaving guest may send too many requests to the
+ * backend exhausting its grant references and memory
+ * (consider this from security POV)
+ */
+
+/**
+ * DOC: Driver limitations
+ *
+ * #. Only primary plane without additional properties is supported.
+ *
+ * #. Only one video mode per connector supported which is configured
+ * via XenStore.
+ *
+ * #. All CRTCs operate at fixed frequency of 60Hz.
+ */
+
+/* timeout in ms to wait for backend to respond */
+#define XEN_DRM_FRONT_WAIT_BACK_MS 3000
+
+struct xen_drm_front_info {
+ struct xenbus_device *xb_dev;
+ struct xen_drm_front_drm_info *drm_info;
+
+ /* to protect data between backend IO code and interrupt handler */
+ spinlock_t io_lock;
+
+ int num_evt_pairs;
+ struct xen_drm_front_evtchnl_pair *evt_pairs;
+ struct xen_drm_front_cfg cfg;
+
+ /* display buffers */
+ struct list_head dbuf_list;
+};
+
+struct xen_drm_front_drm_pipeline {
+ struct xen_drm_front_drm_info *drm_info;
+
+ int index;
+
+ struct drm_simple_display_pipe pipe;
+
+ struct drm_connector conn;
+ /* These are only for connector mode checking */
+ int width, height;
+
+ struct drm_pending_vblank_event *pending_event;
+
+ struct delayed_work pflip_to_worker;
+
+ bool conn_connected;
+};
+
+struct xen_drm_front_drm_info {
+ struct xen_drm_front_info *front_info;
+ struct drm_device *drm_dev;
+
+ struct xen_drm_front_drm_pipeline pipeline[XEN_DRM_FRONT_MAX_CRTCS];
+};
+
+static inline u64 xen_drm_front_fb_to_cookie(struct drm_framebuffer *fb)
+{
+ return (uintptr_t)fb;
+}
+
+static inline u64 xen_drm_front_dbuf_to_cookie(struct drm_gem_object *gem_obj)
+{
+ return (uintptr_t)gem_obj;
+}
+
+int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
+ u32 x, u32 y, u32 width, u32 height,
+ u32 bpp, u64 fb_cookie);
+
+int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
+ u64 dbuf_cookie, u32 width, u32 height,
+ u32 bpp, u64 size, u32 offset, struct page **pages);
+
+int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
+ u64 dbuf_cookie, u64 fb_cookie, u32 width,
+ u32 height, u32 pixel_format);
+
+int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info,
+ u64 fb_cookie);
+
+int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
+ int conn_idx, u64 fb_cookie);
+
+void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
+ int conn_idx, u64 fb_cookie);
+
+void xen_drm_front_gem_object_free(struct drm_gem_object *obj);
+
+#endif /* __XEN_DRM_FRONT_H_ */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_cfg.c b/drivers/gpu/drm/xen/xen_drm_front_cfg.c
new file mode 100644
index 0000000000..ec53b9cc9e
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_cfg.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ * Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include <linux/device.h>
+
+#include <drm/drm_print.h>
+
+#include <xen/interface/io/displif.h>
+#include <xen/xenbus.h>
+
+#include "xen_drm_front.h"
+#include "xen_drm_front_cfg.h"
+
+static int cfg_connector(struct xen_drm_front_info *front_info,
+ struct xen_drm_front_cfg_connector *connector,
+ const char *path, int index)
+{
+ char *connector_path;
+
+ connector_path = devm_kasprintf(&front_info->xb_dev->dev,
+ GFP_KERNEL, "%s/%d", path, index);
+ if (!connector_path)
+ return -ENOMEM;
+
+ if (xenbus_scanf(XBT_NIL, connector_path, XENDISPL_FIELD_RESOLUTION,
+ "%d" XENDISPL_RESOLUTION_SEPARATOR "%d",
+ &connector->width, &connector->height) < 0) {
+ /* either no entry configured or wrong resolution set */
+ connector->width = 0;
+ connector->height = 0;
+ return -EINVAL;
+ }
+
+ connector->xenstore_path = connector_path;
+
+ DRM_INFO("Connector %s: resolution %dx%d\n",
+ connector_path, connector->width, connector->height);
+ return 0;
+}
+
+int xen_drm_front_cfg_card(struct xen_drm_front_info *front_info,
+ struct xen_drm_front_cfg *cfg)
+{
+ struct xenbus_device *xb_dev = front_info->xb_dev;
+ int ret, i;
+
+ if (xenbus_read_unsigned(front_info->xb_dev->nodename,
+ XENDISPL_FIELD_BE_ALLOC, 0)) {
+ DRM_INFO("Backend can provide display buffers\n");
+ cfg->be_alloc = true;
+ }
+
+ cfg->num_connectors = 0;
+ for (i = 0; i < ARRAY_SIZE(cfg->connectors); i++) {
+ ret = cfg_connector(front_info, &cfg->connectors[i],
+ xb_dev->nodename, i);
+ if (ret < 0)
+ break;
+ cfg->num_connectors++;
+ }
+
+ if (!cfg->num_connectors) {
+ DRM_ERROR("No connector(s) configured at %s\n",
+ xb_dev->nodename);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/xen/xen_drm_front_cfg.h b/drivers/gpu/drm/xen/xen_drm_front_cfg.h
new file mode 100644
index 0000000000..aa8490ba91
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_cfg.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ * Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_DRM_FRONT_CFG_H_
+#define __XEN_DRM_FRONT_CFG_H_
+
+#include <linux/types.h>
+
+#define XEN_DRM_FRONT_MAX_CRTCS 4
+
+struct xen_drm_front_cfg_connector {
+ int width;
+ int height;
+ char *xenstore_path;
+};
+
+struct xen_drm_front_cfg {
+ struct xen_drm_front_info *front_info;
+ /* number of connectors in this configuration */
+ int num_connectors;
+ /* connector configurations */
+ struct xen_drm_front_cfg_connector connectors[XEN_DRM_FRONT_MAX_CRTCS];
+ /* set if dumb buffers are allocated externally on backend side */
+ bool be_alloc;
+};
+
+int xen_drm_front_cfg_card(struct xen_drm_front_info *front_info,
+ struct xen_drm_front_cfg *cfg);
+
+#endif /* __XEN_DRM_FRONT_CFG_H_ */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_conn.c b/drivers/gpu/drm/xen/xen_drm_front_conn.c
new file mode 100644
index 0000000000..a1ba6d3d05
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_conn.c
@@ -0,0 +1,117 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ * Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_probe_helper.h>
+
+#include <video/videomode.h>
+
+#include "xen_drm_front.h"
+#include "xen_drm_front_conn.h"
+#include "xen_drm_front_kms.h"
+
+static struct xen_drm_front_drm_pipeline *
+to_xen_drm_pipeline(struct drm_connector *connector)
+{
+ return container_of(connector, struct xen_drm_front_drm_pipeline, conn);
+}
+
+static const u32 plane_formats[] = {
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_YUYV,
+};
+
+const u32 *xen_drm_front_conn_get_formats(int *format_count)
+{
+ *format_count = ARRAY_SIZE(plane_formats);
+ return plane_formats;
+}
+
+static int connector_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force)
+{
+ struct xen_drm_front_drm_pipeline *pipeline =
+ to_xen_drm_pipeline(connector);
+
+ if (drm_dev_is_unplugged(connector->dev))
+ pipeline->conn_connected = false;
+
+ return pipeline->conn_connected ? connector_status_connected :
+ connector_status_disconnected;
+}
+
+#define XEN_DRM_CRTC_VREFRESH_HZ 60
+
+static int connector_get_modes(struct drm_connector *connector)
+{
+ struct xen_drm_front_drm_pipeline *pipeline =
+ to_xen_drm_pipeline(connector);
+ struct drm_display_mode *mode;
+ struct videomode videomode;
+ int width, height;
+
+ mode = drm_mode_create(connector->dev);
+ if (!mode)
+ return 0;
+
+ memset(&videomode, 0, sizeof(videomode));
+ videomode.hactive = pipeline->width;
+ videomode.vactive = pipeline->height;
+ width = videomode.hactive + videomode.hfront_porch +
+ videomode.hback_porch + videomode.hsync_len;
+ height = videomode.vactive + videomode.vfront_porch +
+ videomode.vback_porch + videomode.vsync_len;
+ videomode.pixelclock = width * height * XEN_DRM_CRTC_VREFRESH_HZ;
+ mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
+
+ drm_display_mode_from_videomode(&videomode, mode);
+ drm_mode_probed_add(connector, mode);
+ return 1;
+}
+
+static const struct drm_connector_helper_funcs connector_helper_funcs = {
+ .get_modes = connector_get_modes,
+ .detect_ctx = connector_detect,
+};
+
+static const struct drm_connector_funcs connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+int xen_drm_front_conn_init(struct xen_drm_front_drm_info *drm_info,
+ struct drm_connector *connector)
+{
+ struct xen_drm_front_drm_pipeline *pipeline =
+ to_xen_drm_pipeline(connector);
+
+ drm_connector_helper_add(connector, &connector_helper_funcs);
+
+ pipeline->conn_connected = true;
+
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+
+ return drm_connector_init(drm_info->drm_dev, connector,
+ &connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
+}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_conn.h b/drivers/gpu/drm/xen/xen_drm_front_conn.h
new file mode 100644
index 0000000000..e5f4314899
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_conn.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ * Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_DRM_FRONT_CONN_H_
+#define __XEN_DRM_FRONT_CONN_H_
+
+#include <linux/types.h>
+
+struct drm_connector;
+struct xen_drm_front_drm_info;
+
+
+int xen_drm_front_conn_init(struct xen_drm_front_drm_info *drm_info,
+ struct drm_connector *connector);
+
+const u32 *xen_drm_front_conn_get_formats(int *format_count);
+
+#endif /* __XEN_DRM_FRONT_CONN_H_ */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
new file mode 100644
index 0000000000..e52afd7923
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.c
@@ -0,0 +1,366 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ * Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/irq.h>
+
+#include <drm/drm_print.h>
+
+#include <xen/xenbus.h>
+#include <xen/events.h>
+#include <xen/grant_table.h>
+
+#include "xen_drm_front.h"
+#include "xen_drm_front_evtchnl.h"
+
+static irqreturn_t evtchnl_interrupt_ctrl(int irq, void *dev_id)
+{
+ struct xen_drm_front_evtchnl *evtchnl = dev_id;
+ struct xen_drm_front_info *front_info = evtchnl->front_info;
+ struct xendispl_resp *resp;
+ RING_IDX i, rp;
+ unsigned long flags;
+
+ if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&front_info->io_lock, flags);
+
+again:
+ rp = evtchnl->u.req.ring.sring->rsp_prod;
+ /* ensure we see queued responses up to rp */
+ virt_rmb();
+
+ for (i = evtchnl->u.req.ring.rsp_cons; i != rp; i++) {
+ resp = RING_GET_RESPONSE(&evtchnl->u.req.ring, i);
+ if (unlikely(resp->id != evtchnl->evt_id))
+ continue;
+
+ switch (resp->operation) {
+ case XENDISPL_OP_PG_FLIP:
+ case XENDISPL_OP_FB_ATTACH:
+ case XENDISPL_OP_FB_DETACH:
+ case XENDISPL_OP_DBUF_CREATE:
+ case XENDISPL_OP_DBUF_DESTROY:
+ case XENDISPL_OP_SET_CONFIG:
+ evtchnl->u.req.resp_status = resp->status;
+ complete(&evtchnl->u.req.completion);
+ break;
+
+ default:
+ DRM_ERROR("Operation %d is not supported\n",
+ resp->operation);
+ break;
+ }
+ }
+
+ evtchnl->u.req.ring.rsp_cons = i;
+
+ if (i != evtchnl->u.req.ring.req_prod_pvt) {
+ int more_to_do;
+
+ RING_FINAL_CHECK_FOR_RESPONSES(&evtchnl->u.req.ring,
+ more_to_do);
+ if (more_to_do)
+ goto again;
+ } else {
+ evtchnl->u.req.ring.sring->rsp_event = i + 1;
+ }
+
+ spin_unlock_irqrestore(&front_info->io_lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t evtchnl_interrupt_evt(int irq, void *dev_id)
+{
+ struct xen_drm_front_evtchnl *evtchnl = dev_id;
+ struct xen_drm_front_info *front_info = evtchnl->front_info;
+ struct xendispl_event_page *page = evtchnl->u.evt.page;
+ u32 cons, prod;
+ unsigned long flags;
+
+ if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
+ return IRQ_HANDLED;
+
+ spin_lock_irqsave(&front_info->io_lock, flags);
+
+ prod = page->in_prod;
+ /* ensure we see ring contents up to prod */
+ virt_rmb();
+ if (prod == page->in_cons)
+ goto out;
+
+ for (cons = page->in_cons; cons != prod; cons++) {
+ struct xendispl_evt *event;
+
+ event = &XENDISPL_IN_RING_REF(page, cons);
+ if (unlikely(event->id != evtchnl->evt_id++))
+ continue;
+
+ switch (event->type) {
+ case XENDISPL_EVT_PG_FLIP:
+ xen_drm_front_on_frame_done(front_info, evtchnl->index,
+ event->op.pg_flip.fb_cookie);
+ break;
+ }
+ }
+ page->in_cons = cons;
+ /* ensure ring contents */
+ virt_wmb();
+
+out:
+ spin_unlock_irqrestore(&front_info->io_lock, flags);
+ return IRQ_HANDLED;
+}
+
+static void evtchnl_free(struct xen_drm_front_info *front_info,
+ struct xen_drm_front_evtchnl *evtchnl)
+{
+ void *page = NULL;
+
+ if (evtchnl->type == EVTCHNL_TYPE_REQ)
+ page = evtchnl->u.req.ring.sring;
+ else if (evtchnl->type == EVTCHNL_TYPE_EVT)
+ page = evtchnl->u.evt.page;
+ if (!page)
+ return;
+
+ evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
+
+ if (evtchnl->type == EVTCHNL_TYPE_REQ) {
+ /* release all who still waits for response if any */
+ evtchnl->u.req.resp_status = -EIO;
+ complete_all(&evtchnl->u.req.completion);
+ }
+
+ if (evtchnl->irq)
+ unbind_from_irqhandler(evtchnl->irq, evtchnl);
+
+ if (evtchnl->port)
+ xenbus_free_evtchn(front_info->xb_dev, evtchnl->port);
+
+ /* end access and free the page */
+ xenbus_teardown_ring(&page, 1, &evtchnl->gref);
+
+ memset(evtchnl, 0, sizeof(*evtchnl));
+}
+
+static int evtchnl_alloc(struct xen_drm_front_info *front_info, int index,
+ struct xen_drm_front_evtchnl *evtchnl,
+ enum xen_drm_front_evtchnl_type type)
+{
+ struct xenbus_device *xb_dev = front_info->xb_dev;
+ void *page;
+ irq_handler_t handler;
+ int ret;
+
+ memset(evtchnl, 0, sizeof(*evtchnl));
+ evtchnl->type = type;
+ evtchnl->index = index;
+ evtchnl->front_info = front_info;
+ evtchnl->state = EVTCHNL_STATE_DISCONNECTED;
+
+ ret = xenbus_setup_ring(xb_dev, GFP_NOIO | __GFP_HIGH, &page,
+ 1, &evtchnl->gref);
+ if (ret)
+ goto fail;
+
+ if (type == EVTCHNL_TYPE_REQ) {
+ struct xen_displif_sring *sring;
+
+ init_completion(&evtchnl->u.req.completion);
+ mutex_init(&evtchnl->u.req.req_io_lock);
+ sring = page;
+ XEN_FRONT_RING_INIT(&evtchnl->u.req.ring, sring, XEN_PAGE_SIZE);
+
+ handler = evtchnl_interrupt_ctrl;
+ } else {
+ evtchnl->u.evt.page = page;
+ handler = evtchnl_interrupt_evt;
+ }
+
+ ret = xenbus_alloc_evtchn(xb_dev, &evtchnl->port);
+ if (ret < 0)
+ goto fail;
+
+ ret = bind_evtchn_to_irqhandler(evtchnl->port,
+ handler, 0, xb_dev->devicetype,
+ evtchnl);
+ if (ret < 0)
+ goto fail;
+
+ evtchnl->irq = ret;
+ return 0;
+
+fail:
+ DRM_ERROR("Failed to allocate ring: %d\n", ret);
+ return ret;
+}
+
+int xen_drm_front_evtchnl_create_all(struct xen_drm_front_info *front_info)
+{
+ struct xen_drm_front_cfg *cfg;
+ int ret, conn;
+
+ cfg = &front_info->cfg;
+
+ front_info->evt_pairs =
+ kcalloc(cfg->num_connectors,
+ sizeof(struct xen_drm_front_evtchnl_pair),
+ GFP_KERNEL);
+ if (!front_info->evt_pairs) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ for (conn = 0; conn < cfg->num_connectors; conn++) {
+ ret = evtchnl_alloc(front_info, conn,
+ &front_info->evt_pairs[conn].req,
+ EVTCHNL_TYPE_REQ);
+ if (ret < 0) {
+ DRM_ERROR("Error allocating control channel\n");
+ goto fail;
+ }
+
+ ret = evtchnl_alloc(front_info, conn,
+ &front_info->evt_pairs[conn].evt,
+ EVTCHNL_TYPE_EVT);
+ if (ret < 0) {
+ DRM_ERROR("Error allocating in-event channel\n");
+ goto fail;
+ }
+ }
+ front_info->num_evt_pairs = cfg->num_connectors;
+ return 0;
+
+fail:
+ xen_drm_front_evtchnl_free_all(front_info);
+ return ret;
+}
+
+static int evtchnl_publish(struct xenbus_transaction xbt,
+ struct xen_drm_front_evtchnl *evtchnl,
+ const char *path, const char *node_ring,
+ const char *node_chnl)
+{
+ struct xenbus_device *xb_dev = evtchnl->front_info->xb_dev;
+ int ret;
+
+ /* write control channel ring reference */
+ ret = xenbus_printf(xbt, path, node_ring, "%u", evtchnl->gref);
+ if (ret < 0) {
+ xenbus_dev_error(xb_dev, ret, "writing ring-ref");
+ return ret;
+ }
+
+ /* write event channel ring reference */
+ ret = xenbus_printf(xbt, path, node_chnl, "%u", evtchnl->port);
+ if (ret < 0) {
+ xenbus_dev_error(xb_dev, ret, "writing event channel");
+ return ret;
+ }
+
+ return 0;
+}
+
+int xen_drm_front_evtchnl_publish_all(struct xen_drm_front_info *front_info)
+{
+ struct xenbus_transaction xbt;
+ struct xen_drm_front_cfg *plat_data;
+ int ret, conn;
+
+ plat_data = &front_info->cfg;
+
+again:
+ ret = xenbus_transaction_start(&xbt);
+ if (ret < 0) {
+ xenbus_dev_fatal(front_info->xb_dev, ret,
+ "starting transaction");
+ return ret;
+ }
+
+ for (conn = 0; conn < plat_data->num_connectors; conn++) {
+ ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].req,
+ plat_data->connectors[conn].xenstore_path,
+ XENDISPL_FIELD_REQ_RING_REF,
+ XENDISPL_FIELD_REQ_CHANNEL);
+ if (ret < 0)
+ goto fail;
+
+ ret = evtchnl_publish(xbt, &front_info->evt_pairs[conn].evt,
+ plat_data->connectors[conn].xenstore_path,
+ XENDISPL_FIELD_EVT_RING_REF,
+ XENDISPL_FIELD_EVT_CHANNEL);
+ if (ret < 0)
+ goto fail;
+ }
+
+ ret = xenbus_transaction_end(xbt, 0);
+ if (ret < 0) {
+ if (ret == -EAGAIN)
+ goto again;
+
+ xenbus_dev_fatal(front_info->xb_dev, ret,
+ "completing transaction");
+ goto fail_to_end;
+ }
+
+ return 0;
+
+fail:
+ xenbus_transaction_end(xbt, 1);
+
+fail_to_end:
+ xenbus_dev_fatal(front_info->xb_dev, ret, "writing Xen store");
+ return ret;
+}
+
+void xen_drm_front_evtchnl_flush(struct xen_drm_front_evtchnl *evtchnl)
+{
+ int notify;
+
+ evtchnl->u.req.ring.req_prod_pvt++;
+ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&evtchnl->u.req.ring, notify);
+ if (notify)
+ notify_remote_via_irq(evtchnl->irq);
+}
+
+void xen_drm_front_evtchnl_set_state(struct xen_drm_front_info *front_info,
+ enum xen_drm_front_evtchnl_state state)
+{
+ unsigned long flags;
+ int i;
+
+ if (!front_info->evt_pairs)
+ return;
+
+ spin_lock_irqsave(&front_info->io_lock, flags);
+ for (i = 0; i < front_info->num_evt_pairs; i++) {
+ front_info->evt_pairs[i].req.state = state;
+ front_info->evt_pairs[i].evt.state = state;
+ }
+ spin_unlock_irqrestore(&front_info->io_lock, flags);
+}
+
+void xen_drm_front_evtchnl_free_all(struct xen_drm_front_info *front_info)
+{
+ int i;
+
+ if (!front_info->evt_pairs)
+ return;
+
+ for (i = 0; i < front_info->num_evt_pairs; i++) {
+ evtchnl_free(front_info, &front_info->evt_pairs[i].req);
+ evtchnl_free(front_info, &front_info->evt_pairs[i].evt);
+ }
+
+ kfree(front_info->evt_pairs);
+ front_info->evt_pairs = NULL;
+}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_evtchnl.h b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.h
new file mode 100644
index 0000000000..b0af699433
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_evtchnl.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ * Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_DRM_FRONT_EVTCHNL_H_
+#define __XEN_DRM_FRONT_EVTCHNL_H_
+
+#include <linux/completion.h>
+#include <linux/types.h>
+
+#include <xen/interface/io/ring.h>
+#include <xen/interface/io/displif.h>
+
+/*
+ * All operations which are not connector oriented use this ctrl event channel,
+ * e.g. fb_attach/destroy which belong to a DRM device, not to a CRTC.
+ */
+#define GENERIC_OP_EVT_CHNL 0
+
+enum xen_drm_front_evtchnl_state {
+ EVTCHNL_STATE_DISCONNECTED,
+ EVTCHNL_STATE_CONNECTED,
+};
+
+enum xen_drm_front_evtchnl_type {
+ EVTCHNL_TYPE_REQ,
+ EVTCHNL_TYPE_EVT,
+};
+
+struct xen_drm_front_drm_info;
+
+struct xen_drm_front_evtchnl {
+ struct xen_drm_front_info *front_info;
+ int gref;
+ int port;
+ int irq;
+ int index;
+ enum xen_drm_front_evtchnl_state state;
+ enum xen_drm_front_evtchnl_type type;
+ /* either response id or incoming event id */
+ u16 evt_id;
+ /* next request id or next expected event id */
+ u16 evt_next_id;
+ union {
+ struct {
+ struct xen_displif_front_ring ring;
+ struct completion completion;
+ /* latest response status */
+ int resp_status;
+ /* serializer for backend IO: request/response */
+ struct mutex req_io_lock;
+ } req;
+ struct {
+ struct xendispl_event_page *page;
+ } evt;
+ } u;
+};
+
+struct xen_drm_front_evtchnl_pair {
+ struct xen_drm_front_evtchnl req;
+ struct xen_drm_front_evtchnl evt;
+};
+
+int xen_drm_front_evtchnl_create_all(struct xen_drm_front_info *front_info);
+
+int xen_drm_front_evtchnl_publish_all(struct xen_drm_front_info *front_info);
+
+void xen_drm_front_evtchnl_flush(struct xen_drm_front_evtchnl *evtchnl);
+
+void xen_drm_front_evtchnl_set_state(struct xen_drm_front_info *front_info,
+ enum xen_drm_front_evtchnl_state state);
+
+void xen_drm_front_evtchnl_free_all(struct xen_drm_front_info *front_info);
+
+#endif /* __XEN_DRM_FRONT_EVTCHNL_H_ */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
new file mode 100644
index 0000000000..3ad2b4cfd1
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ * Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/scatterlist.h>
+#include <linux/shmem_fs.h>
+
+#include <drm/drm_gem.h>
+#include <drm/drm_prime.h>
+#include <drm/drm_probe_helper.h>
+
+#include <xen/balloon.h>
+#include <xen/xen.h>
+
+#include "xen_drm_front.h"
+#include "xen_drm_front_gem.h"
+
+struct xen_gem_object {
+ struct drm_gem_object base;
+
+ size_t num_pages;
+ struct page **pages;
+
+ /* set for buffers allocated by the backend */
+ bool be_alloc;
+
+ /* this is for imported PRIME buffer */
+ struct sg_table *sgt_imported;
+};
+
+static inline struct xen_gem_object *
+to_xen_gem_obj(struct drm_gem_object *gem_obj)
+{
+ return container_of(gem_obj, struct xen_gem_object, base);
+}
+
+static int gem_alloc_pages_array(struct xen_gem_object *xen_obj,
+ size_t buf_size)
+{
+ xen_obj->num_pages = DIV_ROUND_UP(buf_size, PAGE_SIZE);
+ xen_obj->pages = kvmalloc_array(xen_obj->num_pages,
+ sizeof(struct page *), GFP_KERNEL);
+ return !xen_obj->pages ? -ENOMEM : 0;
+}
+
+static void gem_free_pages_array(struct xen_gem_object *xen_obj)
+{
+ kvfree(xen_obj->pages);
+ xen_obj->pages = NULL;
+}
+
+static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj,
+ struct vm_area_struct *vma)
+{
+ struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
+ int ret;
+
+ vma->vm_ops = gem_obj->funcs->vm_ops;
+
+ /*
+ * Clear the VM_PFNMAP flag that was set by drm_gem_mmap(), and set the
+ * vm_pgoff (used as a fake buffer offset by DRM) to 0 as we want to map
+ * the whole buffer.
+ */
+ vm_flags_mod(vma, VM_MIXEDMAP | VM_DONTEXPAND, VM_PFNMAP);
+ vma->vm_pgoff = 0;
+
+ /*
+ * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
+ * all memory which is shared with other entities in the system
+ * (including the hypervisor and other guests) must reside in memory
+ * which is mapped as Normal Inner Write-Back Outer Write-Back
+ * Inner-Shareable.
+ */
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+
+ /*
+ * vm_operations_struct.fault handler will be called if CPU access
+ * to VM is here. For GPUs this isn't the case, because CPU doesn't
+ * touch the memory. Insert pages now, so both CPU and GPU are happy.
+ *
+ * FIXME: as we insert all the pages now then no .fault handler must
+ * be called, so don't provide one
+ */
+ ret = vm_map_pages(vma, xen_obj->pages, xen_obj->num_pages);
+ if (ret < 0)
+ DRM_ERROR("Failed to map pages into vma: %d\n", ret);
+
+ return ret;
+}
+
+static const struct vm_operations_struct xen_drm_drv_vm_ops = {
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct drm_gem_object_funcs xen_drm_front_gem_object_funcs = {
+ .free = xen_drm_front_gem_object_free,
+ .get_sg_table = xen_drm_front_gem_get_sg_table,
+ .vmap = xen_drm_front_gem_prime_vmap,
+ .vunmap = xen_drm_front_gem_prime_vunmap,
+ .mmap = xen_drm_front_gem_object_mmap,
+ .vm_ops = &xen_drm_drv_vm_ops,
+};
+
+static struct xen_gem_object *gem_create_obj(struct drm_device *dev,
+ size_t size)
+{
+ struct xen_gem_object *xen_obj;
+ int ret;
+
+ xen_obj = kzalloc(sizeof(*xen_obj), GFP_KERNEL);
+ if (!xen_obj)
+ return ERR_PTR(-ENOMEM);
+
+ xen_obj->base.funcs = &xen_drm_front_gem_object_funcs;
+
+ ret = drm_gem_object_init(dev, &xen_obj->base, size);
+ if (ret < 0) {
+ kfree(xen_obj);
+ return ERR_PTR(ret);
+ }
+
+ return xen_obj;
+}
+
+static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size)
+{
+ struct xen_drm_front_drm_info *drm_info = dev->dev_private;
+ struct xen_gem_object *xen_obj;
+ int ret;
+
+ size = round_up(size, PAGE_SIZE);
+ xen_obj = gem_create_obj(dev, size);
+ if (IS_ERR(xen_obj))
+ return xen_obj;
+
+ if (drm_info->front_info->cfg.be_alloc) {
+ /*
+ * backend will allocate space for this buffer, so
+ * only allocate array of pointers to pages
+ */
+ ret = gem_alloc_pages_array(xen_obj, size);
+ if (ret < 0)
+ goto fail;
+
+ /*
+ * allocate ballooned pages which will be used to map
+ * grant references provided by the backend
+ */
+ ret = xen_alloc_unpopulated_pages(xen_obj->num_pages,
+ xen_obj->pages);
+ if (ret < 0) {
+ DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n",
+ xen_obj->num_pages, ret);
+ gem_free_pages_array(xen_obj);
+ goto fail;
+ }
+
+ xen_obj->be_alloc = true;
+ return xen_obj;
+ }
+ /*
+ * need to allocate backing pages now, so we can share those
+ * with the backend
+ */
+ xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
+ xen_obj->pages = drm_gem_get_pages(&xen_obj->base);
+ if (IS_ERR(xen_obj->pages)) {
+ ret = PTR_ERR(xen_obj->pages);
+ xen_obj->pages = NULL;
+ goto fail;
+ }
+
+ return xen_obj;
+
+fail:
+ DRM_ERROR("Failed to allocate buffer with size %zu\n", size);
+ return ERR_PTR(ret);
+}
+
+struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
+ size_t size)
+{
+ struct xen_gem_object *xen_obj;
+
+ xen_obj = gem_create(dev, size);
+ if (IS_ERR(xen_obj))
+ return ERR_CAST(xen_obj);
+
+ return &xen_obj->base;
+}
+
+void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj)
+{
+ struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
+
+ if (xen_obj->base.import_attach) {
+ drm_prime_gem_destroy(&xen_obj->base, xen_obj->sgt_imported);
+ gem_free_pages_array(xen_obj);
+ } else {
+ if (xen_obj->pages) {
+ if (xen_obj->be_alloc) {
+ xen_free_unpopulated_pages(xen_obj->num_pages,
+ xen_obj->pages);
+ gem_free_pages_array(xen_obj);
+ } else {
+ drm_gem_put_pages(&xen_obj->base,
+ xen_obj->pages, true, false);
+ }
+ }
+ }
+ drm_gem_object_release(gem_obj);
+ kfree(xen_obj);
+}
+
+struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *gem_obj)
+{
+ struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
+
+ return xen_obj->pages;
+}
+
+struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
+{
+ struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
+
+ if (!xen_obj->pages)
+ return ERR_PTR(-ENOMEM);
+
+ return drm_prime_pages_to_sg(gem_obj->dev,
+ xen_obj->pages, xen_obj->num_pages);
+}
+
+struct drm_gem_object *
+xen_drm_front_gem_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt)
+{
+ struct xen_drm_front_drm_info *drm_info = dev->dev_private;
+ struct xen_gem_object *xen_obj;
+ size_t size;
+ int ret;
+
+ size = attach->dmabuf->size;
+ xen_obj = gem_create_obj(dev, size);
+ if (IS_ERR(xen_obj))
+ return ERR_CAST(xen_obj);
+
+ ret = gem_alloc_pages_array(xen_obj, size);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ xen_obj->sgt_imported = sgt;
+
+ ret = drm_prime_sg_to_page_array(sgt, xen_obj->pages,
+ xen_obj->num_pages);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ ret = xen_drm_front_dbuf_create(drm_info->front_info,
+ xen_drm_front_dbuf_to_cookie(&xen_obj->base),
+ 0, 0, 0, size, sgt->sgl->offset,
+ xen_obj->pages);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ DRM_DEBUG("Imported buffer of size %zu with nents %u\n",
+ size, sgt->orig_nents);
+
+ return &xen_obj->base;
+}
+
+int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj,
+ struct iosys_map *map)
+{
+ struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
+ void *vaddr;
+
+ if (!xen_obj->pages)
+ return -ENOMEM;
+
+ /* Please see comment in gem_mmap_obj on mapping and attributes. */
+ vaddr = vmap(xen_obj->pages, xen_obj->num_pages,
+ VM_MAP, PAGE_KERNEL);
+ if (!vaddr)
+ return -ENOMEM;
+ iosys_map_set_vaddr(map, vaddr);
+
+ return 0;
+}
+
+void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
+ struct iosys_map *map)
+{
+ vunmap(map->vaddr);
+}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.h b/drivers/gpu/drm/xen/xen_drm_front_gem.h
new file mode 100644
index 0000000000..a718a1f382
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ * Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_DRM_FRONT_GEM_H
+#define __XEN_DRM_FRONT_GEM_H
+
+struct dma_buf_attachment;
+struct iosys_map;
+struct drm_device;
+struct drm_gem_object;
+struct sg_table;
+
+struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev,
+ size_t size);
+
+struct drm_gem_object *
+xen_drm_front_gem_import_sg_table(struct drm_device *dev,
+ struct dma_buf_attachment *attach,
+ struct sg_table *sgt);
+
+struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj);
+
+struct page **xen_drm_front_gem_get_pages(struct drm_gem_object *obj);
+
+void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj);
+
+int xen_drm_front_gem_prime_vmap(struct drm_gem_object *gem_obj,
+ struct iosys_map *map);
+
+void xen_drm_front_gem_prime_vunmap(struct drm_gem_object *gem_obj,
+ struct iosys_map *map);
+
+#endif /* __XEN_DRM_FRONT_GEM_H */
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
new file mode 100644
index 0000000000..dfa78a49a6
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -0,0 +1,381 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/*
+ * Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "xen_drm_front.h"
+#include "xen_drm_front_conn.h"
+#include "xen_drm_front_kms.h"
+
+/*
+ * Timeout in ms to wait for frame done event from the backend:
+ * must be a bit more than IO time-out
+ */
+#define FRAME_DONE_TO_MS (XEN_DRM_FRONT_WAIT_BACK_MS + 100)
+
+static struct xen_drm_front_drm_pipeline *
+to_xen_drm_pipeline(struct drm_simple_display_pipe *pipe)
+{
+ return container_of(pipe, struct xen_drm_front_drm_pipeline, pipe);
+}
+
+static void fb_destroy(struct drm_framebuffer *fb)
+{
+ struct xen_drm_front_drm_info *drm_info = fb->dev->dev_private;
+ int idx;
+
+ if (drm_dev_enter(fb->dev, &idx)) {
+ xen_drm_front_fb_detach(drm_info->front_info,
+ xen_drm_front_fb_to_cookie(fb));
+ drm_dev_exit(idx);
+ }
+ drm_gem_fb_destroy(fb);
+}
+
+static const struct drm_framebuffer_funcs fb_funcs = {
+ .destroy = fb_destroy,
+};
+
+static struct drm_framebuffer *
+fb_create(struct drm_device *dev, struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct xen_drm_front_drm_info *drm_info = dev->dev_private;
+ struct drm_framebuffer *fb;
+ struct drm_gem_object *gem_obj;
+ int ret;
+
+ fb = drm_gem_fb_create_with_funcs(dev, filp, mode_cmd, &fb_funcs);
+ if (IS_ERR(fb))
+ return fb;
+
+ gem_obj = fb->obj[0];
+
+ ret = xen_drm_front_fb_attach(drm_info->front_info,
+ xen_drm_front_dbuf_to_cookie(gem_obj),
+ xen_drm_front_fb_to_cookie(fb),
+ fb->width, fb->height,
+ fb->format->format);
+ if (ret < 0) {
+ DRM_ERROR("Back failed to attach FB %p: %d\n", fb, ret);
+ goto fail;
+ }
+
+ return fb;
+
+fail:
+ drm_gem_fb_destroy(fb);
+ return ERR_PTR(ret);
+}
+
+static const struct drm_mode_config_funcs mode_config_funcs = {
+ .fb_create = fb_create,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static void send_pending_event(struct xen_drm_front_drm_pipeline *pipeline)
+{
+ struct drm_crtc *crtc = &pipeline->pipe.crtc;
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (pipeline->pending_event)
+ drm_crtc_send_vblank_event(crtc, pipeline->pending_event);
+ pipeline->pending_event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static void display_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct xen_drm_front_drm_pipeline *pipeline =
+ to_xen_drm_pipeline(pipe);
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_framebuffer *fb = plane_state->fb;
+ int ret, idx;
+
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
+
+ ret = xen_drm_front_mode_set(pipeline, crtc->x, crtc->y,
+ fb->width, fb->height,
+ fb->format->cpp[0] * 8,
+ xen_drm_front_fb_to_cookie(fb));
+
+ if (ret) {
+ DRM_ERROR("Failed to enable display: %d\n", ret);
+ pipeline->conn_connected = false;
+ }
+
+ drm_dev_exit(idx);
+}
+
+static void display_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct xen_drm_front_drm_pipeline *pipeline =
+ to_xen_drm_pipeline(pipe);
+ int ret = 0, idx;
+
+ if (drm_dev_enter(pipe->crtc.dev, &idx)) {
+ ret = xen_drm_front_mode_set(pipeline, 0, 0, 0, 0, 0,
+ xen_drm_front_fb_to_cookie(NULL));
+ drm_dev_exit(idx);
+ }
+ if (ret)
+ DRM_ERROR("Failed to disable display: %d\n", ret);
+
+ /* Make sure we can restart with enabled connector next time */
+ pipeline->conn_connected = true;
+
+ /* release stalled event if any */
+ send_pending_event(pipeline);
+}
+
+void xen_drm_front_kms_on_frame_done(struct xen_drm_front_drm_pipeline *pipeline,
+ u64 fb_cookie)
+{
+ /*
+ * This runs in interrupt context, e.g. under
+ * drm_info->front_info->io_lock, so we cannot call _sync version
+ * to cancel the work
+ */
+ cancel_delayed_work(&pipeline->pflip_to_worker);
+
+ send_pending_event(pipeline);
+}
+
+static void pflip_to_worker(struct work_struct *work)
+{
+ struct delayed_work *delayed_work = to_delayed_work(work);
+ struct xen_drm_front_drm_pipeline *pipeline =
+ container_of(delayed_work,
+ struct xen_drm_front_drm_pipeline,
+ pflip_to_worker);
+
+ DRM_ERROR("Frame done timed-out, releasing");
+ send_pending_event(pipeline);
+}
+
+static bool display_send_page_flip(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_plane_state)
+{
+ struct drm_plane_state *plane_state =
+ drm_atomic_get_new_plane_state(old_plane_state->state,
+ &pipe->plane);
+
+ /*
+ * If old_plane_state->fb is NULL and plane_state->fb is not,
+ * then this is an atomic commit which will enable display.
+ * If old_plane_state->fb is not NULL and plane_state->fb is,
+ * then this is an atomic commit which will disable display.
+ * Ignore these and do not send page flip as this framebuffer will be
+ * sent to the backend as a part of display_set_config call.
+ */
+ if (old_plane_state->fb && plane_state->fb) {
+ struct xen_drm_front_drm_pipeline *pipeline =
+ to_xen_drm_pipeline(pipe);
+ struct xen_drm_front_drm_info *drm_info = pipeline->drm_info;
+ int ret;
+
+ schedule_delayed_work(&pipeline->pflip_to_worker,
+ msecs_to_jiffies(FRAME_DONE_TO_MS));
+
+ ret = xen_drm_front_page_flip(drm_info->front_info,
+ pipeline->index,
+ xen_drm_front_fb_to_cookie(plane_state->fb));
+ if (ret) {
+ DRM_ERROR("Failed to send page flip request to backend: %d\n", ret);
+
+ pipeline->conn_connected = false;
+ /*
+ * Report the flip not handled, so pending event is
+ * sent, unblocking user-space.
+ */
+ return false;
+ }
+ /*
+ * Signal that page flip was handled, pending event will be sent
+ * on frame done event from the backend.
+ */
+ return true;
+ }
+
+ return false;
+}
+
+static int display_check(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
+{
+ /*
+ * Xen doesn't initialize vblanking via drm_vblank_init(), so
+ * DRM helpers assume that it doesn't handle vblanking and start
+ * sending out fake VBLANK events automatically.
+ *
+ * As xen contains it's own logic for sending out VBLANK events
+ * in send_pending_event(), disable no_vblank (i.e., the xen
+ * driver has vblanking support).
+ */
+ crtc_state->no_vblank = false;
+
+ return 0;
+}
+
+static void display_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_plane_state)
+{
+ struct xen_drm_front_drm_pipeline *pipeline =
+ to_xen_drm_pipeline(pipe);
+ struct drm_crtc *crtc = &pipe->crtc;
+ struct drm_pending_vblank_event *event;
+ int idx;
+
+ event = crtc->state->event;
+ if (event) {
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ WARN_ON(pipeline->pending_event);
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ crtc->state->event = NULL;
+
+ pipeline->pending_event = event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ if (!drm_dev_enter(pipe->crtc.dev, &idx)) {
+ send_pending_event(pipeline);
+ return;
+ }
+
+ /*
+ * Send page flip request to the backend *after* we have event cached
+ * above, so on page flip done event from the backend we can
+ * deliver it and there is no race condition between this code and
+ * event from the backend.
+ * If this is not a page flip, e.g. no flip done event from the backend
+ * is expected, then send now.
+ */
+ if (!display_send_page_flip(pipe, old_plane_state))
+ send_pending_event(pipeline);
+
+ drm_dev_exit(idx);
+}
+
+static enum drm_mode_status
+display_mode_valid(struct drm_simple_display_pipe *pipe,
+ const struct drm_display_mode *mode)
+{
+ struct xen_drm_front_drm_pipeline *pipeline =
+ container_of(pipe, struct xen_drm_front_drm_pipeline,
+ pipe);
+
+ if (mode->hdisplay != pipeline->width)
+ return MODE_ERROR;
+
+ if (mode->vdisplay != pipeline->height)
+ return MODE_ERROR;
+
+ return MODE_OK;
+}
+
+static const struct drm_simple_display_pipe_funcs display_funcs = {
+ .mode_valid = display_mode_valid,
+ .enable = display_enable,
+ .disable = display_disable,
+ .check = display_check,
+ .update = display_update,
+};
+
+static int display_pipe_init(struct xen_drm_front_drm_info *drm_info,
+ int index, struct xen_drm_front_cfg_connector *cfg,
+ struct xen_drm_front_drm_pipeline *pipeline)
+{
+ struct drm_device *dev = drm_info->drm_dev;
+ const u32 *formats;
+ int format_count;
+ int ret;
+
+ pipeline->drm_info = drm_info;
+ pipeline->index = index;
+ pipeline->height = cfg->height;
+ pipeline->width = cfg->width;
+
+ INIT_DELAYED_WORK(&pipeline->pflip_to_worker, pflip_to_worker);
+
+ ret = xen_drm_front_conn_init(drm_info, &pipeline->conn);
+ if (ret)
+ return ret;
+
+ formats = xen_drm_front_conn_get_formats(&format_count);
+
+ return drm_simple_display_pipe_init(dev, &pipeline->pipe,
+ &display_funcs, formats,
+ format_count, NULL,
+ &pipeline->conn);
+}
+
+int xen_drm_front_kms_init(struct xen_drm_front_drm_info *drm_info)
+{
+ struct drm_device *dev = drm_info->drm_dev;
+ int i, ret;
+
+ drm_mode_config_init(dev);
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ dev->mode_config.max_width = 4095;
+ dev->mode_config.max_height = 2047;
+ dev->mode_config.funcs = &mode_config_funcs;
+
+ for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
+ struct xen_drm_front_cfg_connector *cfg =
+ &drm_info->front_info->cfg.connectors[i];
+ struct xen_drm_front_drm_pipeline *pipeline =
+ &drm_info->pipeline[i];
+
+ ret = display_pipe_init(drm_info, i, cfg, pipeline);
+ if (ret) {
+ drm_mode_config_cleanup(dev);
+ return ret;
+ }
+ }
+
+ drm_mode_config_reset(dev);
+ drm_kms_helper_poll_init(dev);
+ return 0;
+}
+
+void xen_drm_front_kms_fini(struct xen_drm_front_drm_info *drm_info)
+{
+ int i;
+
+ for (i = 0; i < drm_info->front_info->cfg.num_connectors; i++) {
+ struct xen_drm_front_drm_pipeline *pipeline =
+ &drm_info->pipeline[i];
+
+ cancel_delayed_work_sync(&pipeline->pflip_to_worker);
+
+ send_pending_event(pipeline);
+ }
+}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.h b/drivers/gpu/drm/xen/xen_drm_front_kms.h
new file mode 100644
index 0000000000..ab2fbad4fb
--- /dev/null
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+/*
+ * Xen para-virtual DRM device
+ *
+ * Copyright (C) 2016-2018 EPAM Systems Inc.
+ *
+ * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ */
+
+#ifndef __XEN_DRM_FRONT_KMS_H_
+#define __XEN_DRM_FRONT_KMS_H_
+
+#include <linux/types.h>
+
+struct xen_drm_front_drm_info;
+struct xen_drm_front_drm_pipeline;
+
+int xen_drm_front_kms_init(struct xen_drm_front_drm_info *drm_info);
+
+void xen_drm_front_kms_fini(struct xen_drm_front_drm_info *drm_info);
+
+void xen_drm_front_kms_on_frame_done(struct xen_drm_front_drm_pipeline *pipeline,
+ u64 fb_cookie);
+
+#endif /* __XEN_DRM_FRONT_KMS_H_ */