summaryrefslogtreecommitdiffstats
path: root/drivers/dma-buf
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/dma-buf
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/dma-buf')
-rw-r--r--drivers/dma-buf/Kconfig94
-rw-r--r--drivers/dma-buf/Makefile18
-rw-r--r--drivers/dma-buf/dma-buf-sysfs-stats.c202
-rw-r--r--drivers/dma-buf/dma-buf-sysfs-stats.h35
-rw-r--r--drivers/dma-buf/dma-buf.c1639
-rw-r--r--drivers/dma-buf/dma-fence-array.c253
-rw-r--r--drivers/dma-buf/dma-fence-chain.c265
-rw-r--r--drivers/dma-buf/dma-fence-unwrap.c176
-rw-r--r--drivers/dma-buf/dma-fence.c965
-rw-r--r--drivers/dma-buf/dma-heap.c326
-rw-r--r--drivers/dma-buf/dma-resv.c779
-rw-r--r--drivers/dma-buf/heaps/Kconfig14
-rw-r--r--drivers/dma-buf/heaps/Makefile3
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c407
-rw-r--r--drivers/dma-buf/heaps/system_heap.c440
-rw-r--r--drivers/dma-buf/selftest.c167
-rw-r--r--drivers/dma-buf/selftest.h30
-rw-r--r--drivers/dma-buf/selftests.h16
-rw-r--r--drivers/dma-buf/st-dma-fence-chain.c710
-rw-r--r--drivers/dma-buf/st-dma-fence-unwrap.c382
-rw-r--r--drivers/dma-buf/st-dma-fence.c593
-rw-r--r--drivers/dma-buf/st-dma-resv.c316
-rw-r--r--drivers/dma-buf/sw_sync.c412
-rw-r--r--drivers/dma-buf/sync_debug.c190
-rw-r--r--drivers/dma-buf/sync_debug.h72
-rw-r--r--drivers/dma-buf/sync_file.c372
-rw-r--r--drivers/dma-buf/sync_trace.h33
-rw-r--r--drivers/dma-buf/udmabuf.c402
28 files changed, 9311 insertions, 0 deletions
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
new file mode 100644
index 000000000..e4dc53a36
--- /dev/null
+++ b/drivers/dma-buf/Kconfig
@@ -0,0 +1,94 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "DMABUF options"
+
+config SYNC_FILE
+ bool "Explicit Synchronization Framework"
+ default n
+ select DMA_SHARED_BUFFER
+ help
+ The Sync File Framework adds explicit synchronization via
+ userspace. It enables send/receive 'struct dma_fence' objects to/from
+ userspace via Sync File fds for synchronization between drivers via
+ userspace components. It has been ported from Android.
+
+ The first and main user for this is graphics in which a fence is
+ associated with a buffer. When a job is submitted to the GPU a fence
+ is attached to the buffer and is transferred via userspace, using Sync
+ Files fds, to the DRM driver for example. More details at
+ Documentation/driver-api/sync_file.rst.
+
+config SW_SYNC
+ bool "Sync File Validation Framework"
+ default n
+ depends on SYNC_FILE
+ depends on DEBUG_FS
+ help
+ A sync object driver that uses a 32bit counter to coordinate
+ synchronization. Useful when there is no hardware primitive backing
+ the synchronization.
+
+ WARNING: improper use of this can result in deadlocking kernel
+ drivers from userspace. Intended for test and debug only.
+
+config UDMABUF
+ bool "userspace dmabuf misc driver"
+ default n
+ depends on DMA_SHARED_BUFFER
+ depends on MEMFD_CREATE || COMPILE_TEST
+ help
+ A driver to let userspace turn memfd regions into dma-bufs.
+ Qemu can use this to create host dmabufs for guest framebuffers.
+
+config DMABUF_MOVE_NOTIFY
+ bool "Move notify between drivers (EXPERIMENTAL)"
+ default n
+ depends on DMA_SHARED_BUFFER
+ help
+ Don't pin buffers if the dynamic DMA-buf interface is available on
+ both the exporter as well as the importer. This fixes a security
+ problem where userspace is able to pin unrestricted amounts of memory
+ through DMA-buf.
+ This is marked experimental because we don't yet have a consistent
+ execution context and memory management between drivers.
+
+config DMABUF_DEBUG
+ bool "DMA-BUF debug checks"
+ depends on DMA_SHARED_BUFFER
+ default y if DMA_API_DEBUG
+ help
+ This option enables additional checks for DMA-BUF importers and
+ exporters. Specifically it validates that importers do not peek at the
+ underlying struct page when they import a buffer.
+
+config DMABUF_SELFTESTS
+ tristate "Selftests for the dma-buf interfaces"
+ default n
+ depends on DMA_SHARED_BUFFER
+
+menuconfig DMABUF_HEAPS
+ bool "DMA-BUF Userland Memory Heaps"
+ select DMA_SHARED_BUFFER
+ help
+ Choose this option to enable the DMA-BUF userland memory heaps.
+ This options creates per heap chardevs in /dev/dma_heap/ which
+ allows userspace to allocate dma-bufs that can be shared
+ between drivers.
+
+menuconfig DMABUF_SYSFS_STATS
+ bool "DMA-BUF sysfs statistics (DEPRECATED)"
+ depends on DMA_SHARED_BUFFER
+ help
+ Choose this option to enable DMA-BUF sysfs statistics
+ in location /sys/kernel/dmabuf/buffers.
+
+ /sys/kernel/dmabuf/buffers/<inode_number> will contain
+ statistics for the DMA-BUF with the unique inode number
+ <inode_number>.
+
+ This option is deprecated and should sooner or later be removed.
+ Android is the only user of this and it turned out that this resulted
+ in quite some performance problems.
+
+source "drivers/dma-buf/heaps/Kconfig"
+
+endmenu
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
new file mode 100644
index 000000000..70ec901ed
--- /dev/null
+++ b/drivers/dma-buf/Makefile
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-y := dma-buf.o dma-fence.o dma-fence-array.o dma-fence-chain.o \
+ dma-fence-unwrap.o dma-resv.o
+obj-$(CONFIG_DMABUF_HEAPS) += dma-heap.o
+obj-$(CONFIG_DMABUF_HEAPS) += heaps/
+obj-$(CONFIG_SYNC_FILE) += sync_file.o
+obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
+obj-$(CONFIG_UDMABUF) += udmabuf.o
+obj-$(CONFIG_DMABUF_SYSFS_STATS) += dma-buf-sysfs-stats.o
+
+dmabuf_selftests-y := \
+ selftest.o \
+ st-dma-fence.o \
+ st-dma-fence-chain.o \
+ st-dma-fence-unwrap.o \
+ st-dma-resv.o
+
+obj-$(CONFIG_DMABUF_SELFTESTS) += dmabuf_selftests.o
diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.c b/drivers/dma-buf/dma-buf-sysfs-stats.c
new file mode 100644
index 000000000..4b680e10c
--- /dev/null
+++ b/drivers/dma-buf/dma-buf-sysfs-stats.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * DMA-BUF sysfs statistics.
+ *
+ * Copyright (C) 2021 Google LLC.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/dma-resv.h>
+#include <linux/kobject.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "dma-buf-sysfs-stats.h"
+
+#define to_dma_buf_entry_from_kobj(x) container_of(x, struct dma_buf_sysfs_entry, kobj)
+
+/**
+ * DOC: overview
+ *
+ * ``/sys/kernel/debug/dma_buf/bufinfo`` provides an overview of every DMA-BUF
+ * in the system. However, since debugfs is not safe to be mounted in
+ * production, procfs and sysfs can be used to gather DMA-BUF statistics on
+ * production systems.
+ *
+ * The ``/proc/<pid>/fdinfo/<fd>`` files in procfs can be used to gather
+ * information about DMA-BUF fds. Detailed documentation about the interface
+ * is present in Documentation/filesystems/proc.rst.
+ *
+ * Unfortunately, the existing procfs interfaces can only provide information
+ * about the DMA-BUFs for which processes hold fds or have the buffers mmapped
+ * into their address space. This necessitated the creation of the DMA-BUF sysfs
+ * statistics interface to provide per-buffer information on production systems.
+ *
+ * The interface at ``/sys/kernel/dma-buf/buffers`` exposes information about
+ * every DMA-BUF when ``CONFIG_DMABUF_SYSFS_STATS`` is enabled.
+ *
+ * The following stats are exposed by the interface:
+ *
+ * * ``/sys/kernel/dmabuf/buffers/<inode_number>/exporter_name``
+ * * ``/sys/kernel/dmabuf/buffers/<inode_number>/size``
+ *
+ * The information in the interface can also be used to derive per-exporter
+ * statistics. The data from the interface can be gathered on error conditions
+ * or other important events to provide a snapshot of DMA-BUF usage.
+ * It can also be collected periodically by telemetry to monitor various metrics.
+ *
+ * Detailed documentation about the interface is present in
+ * Documentation/ABI/testing/sysfs-kernel-dmabuf-buffers.
+ */
+
+struct dma_buf_stats_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct dma_buf *dmabuf,
+ struct dma_buf_stats_attribute *attr, char *buf);
+};
+#define to_dma_buf_stats_attr(x) container_of(x, struct dma_buf_stats_attribute, attr)
+
+static ssize_t dma_buf_stats_attribute_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct dma_buf_stats_attribute *attribute;
+ struct dma_buf_sysfs_entry *sysfs_entry;
+ struct dma_buf *dmabuf;
+
+ attribute = to_dma_buf_stats_attr(attr);
+ sysfs_entry = to_dma_buf_entry_from_kobj(kobj);
+ dmabuf = sysfs_entry->dmabuf;
+
+ if (!dmabuf || !attribute->show)
+ return -EIO;
+
+ return attribute->show(dmabuf, attribute, buf);
+}
+
+static const struct sysfs_ops dma_buf_stats_sysfs_ops = {
+ .show = dma_buf_stats_attribute_show,
+};
+
+static ssize_t exporter_name_show(struct dma_buf *dmabuf,
+ struct dma_buf_stats_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%s\n", dmabuf->exp_name);
+}
+
+static ssize_t size_show(struct dma_buf *dmabuf,
+ struct dma_buf_stats_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "%zu\n", dmabuf->size);
+}
+
+static struct dma_buf_stats_attribute exporter_name_attribute =
+ __ATTR_RO(exporter_name);
+static struct dma_buf_stats_attribute size_attribute = __ATTR_RO(size);
+
+static struct attribute *dma_buf_stats_default_attrs[] = {
+ &exporter_name_attribute.attr,
+ &size_attribute.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(dma_buf_stats_default);
+
+static void dma_buf_sysfs_release(struct kobject *kobj)
+{
+ struct dma_buf_sysfs_entry *sysfs_entry;
+
+ sysfs_entry = to_dma_buf_entry_from_kobj(kobj);
+ kfree(sysfs_entry);
+}
+
+static struct kobj_type dma_buf_ktype = {
+ .sysfs_ops = &dma_buf_stats_sysfs_ops,
+ .release = dma_buf_sysfs_release,
+ .default_groups = dma_buf_stats_default_groups,
+};
+
+void dma_buf_stats_teardown(struct dma_buf *dmabuf)
+{
+ struct dma_buf_sysfs_entry *sysfs_entry;
+
+ sysfs_entry = dmabuf->sysfs_entry;
+ if (!sysfs_entry)
+ return;
+
+ kobject_del(&sysfs_entry->kobj);
+ kobject_put(&sysfs_entry->kobj);
+}
+
+
+/* Statistics files do not need to send uevents. */
+static int dmabuf_sysfs_uevent_filter(struct kobject *kobj)
+{
+ return 0;
+}
+
+static const struct kset_uevent_ops dmabuf_sysfs_no_uevent_ops = {
+ .filter = dmabuf_sysfs_uevent_filter,
+};
+
+static struct kset *dma_buf_stats_kset;
+static struct kset *dma_buf_per_buffer_stats_kset;
+int dma_buf_init_sysfs_statistics(void)
+{
+ dma_buf_stats_kset = kset_create_and_add("dmabuf",
+ &dmabuf_sysfs_no_uevent_ops,
+ kernel_kobj);
+ if (!dma_buf_stats_kset)
+ return -ENOMEM;
+
+ dma_buf_per_buffer_stats_kset = kset_create_and_add("buffers",
+ &dmabuf_sysfs_no_uevent_ops,
+ &dma_buf_stats_kset->kobj);
+ if (!dma_buf_per_buffer_stats_kset) {
+ kset_unregister(dma_buf_stats_kset);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+void dma_buf_uninit_sysfs_statistics(void)
+{
+ kset_unregister(dma_buf_per_buffer_stats_kset);
+ kset_unregister(dma_buf_stats_kset);
+}
+
+int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file)
+{
+ struct dma_buf_sysfs_entry *sysfs_entry;
+ int ret;
+
+ if (!dmabuf->exp_name) {
+ pr_err("exporter name must not be empty if stats needed\n");
+ return -EINVAL;
+ }
+
+ sysfs_entry = kzalloc(sizeof(struct dma_buf_sysfs_entry), GFP_KERNEL);
+ if (!sysfs_entry)
+ return -ENOMEM;
+
+ sysfs_entry->kobj.kset = dma_buf_per_buffer_stats_kset;
+ sysfs_entry->dmabuf = dmabuf;
+
+ dmabuf->sysfs_entry = sysfs_entry;
+
+ /* create the directory for buffer stats */
+ ret = kobject_init_and_add(&sysfs_entry->kobj, &dma_buf_ktype, NULL,
+ "%lu", file_inode(file)->i_ino);
+ if (ret)
+ goto err_sysfs_dmabuf;
+
+ return 0;
+
+err_sysfs_dmabuf:
+ kobject_put(&sysfs_entry->kobj);
+ dmabuf->sysfs_entry = NULL;
+ return ret;
+}
diff --git a/drivers/dma-buf/dma-buf-sysfs-stats.h b/drivers/dma-buf/dma-buf-sysfs-stats.h
new file mode 100644
index 000000000..7a8a995b7
--- /dev/null
+++ b/drivers/dma-buf/dma-buf-sysfs-stats.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * DMA-BUF sysfs statistics.
+ *
+ * Copyright (C) 2021 Google LLC.
+ */
+
+#ifndef _DMA_BUF_SYSFS_STATS_H
+#define _DMA_BUF_SYSFS_STATS_H
+
+#ifdef CONFIG_DMABUF_SYSFS_STATS
+
+int dma_buf_init_sysfs_statistics(void);
+void dma_buf_uninit_sysfs_statistics(void);
+
+int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file);
+
+void dma_buf_stats_teardown(struct dma_buf *dmabuf);
+#else
+
+static inline int dma_buf_init_sysfs_statistics(void)
+{
+ return 0;
+}
+
+static inline void dma_buf_uninit_sysfs_statistics(void) {}
+
+static inline int dma_buf_stats_setup(struct dma_buf *dmabuf, struct file *file)
+{
+ return 0;
+}
+
+static inline void dma_buf_stats_teardown(struct dma_buf *dmabuf) {}
+#endif
+#endif // _DMA_BUF_SYSFS_STATS_H
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
new file mode 100644
index 000000000..3d58514d0
--- /dev/null
+++ b/drivers/dma-buf/dma-buf.c
@@ -0,0 +1,1639 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Framework for buffer objects that can be shared across devices/subsystems.
+ *
+ * Copyright(C) 2011 Linaro Limited. All rights reserved.
+ * Author: Sumit Semwal <sumit.semwal@ti.com>
+ *
+ * Many thanks to linaro-mm-sig list, and specially
+ * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
+ * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
+ * refining of this idea.
+ */
+
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-unwrap.h>
+#include <linux/anon_inodes.h>
+#include <linux/export.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/sync_file.h>
+#include <linux/poll.h>
+#include <linux/dma-resv.h>
+#include <linux/mm.h>
+#include <linux/mount.h>
+#include <linux/pseudo_fs.h>
+
+#include <uapi/linux/dma-buf.h>
+#include <uapi/linux/magic.h>
+
+#include "dma-buf-sysfs-stats.h"
+
+static inline int is_dma_buf_file(struct file *);
+
+struct dma_buf_list {
+ struct list_head head;
+ struct mutex lock;
+};
+
+static struct dma_buf_list db_list;
+
+static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
+{
+ struct dma_buf *dmabuf;
+ char name[DMA_BUF_NAME_LEN];
+ size_t ret = 0;
+
+ dmabuf = dentry->d_fsdata;
+ spin_lock(&dmabuf->name_lock);
+ if (dmabuf->name)
+ ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
+ spin_unlock(&dmabuf->name_lock);
+
+ return dynamic_dname(buffer, buflen, "/%s:%s",
+ dentry->d_name.name, ret > 0 ? name : "");
+}
+
+static void dma_buf_release(struct dentry *dentry)
+{
+ struct dma_buf *dmabuf;
+
+ dmabuf = dentry->d_fsdata;
+ if (unlikely(!dmabuf))
+ return;
+
+ BUG_ON(dmabuf->vmapping_counter);
+
+ /*
+ * If you hit this BUG() it could mean:
+ * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
+ * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
+ */
+ BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
+
+ dma_buf_stats_teardown(dmabuf);
+ dmabuf->ops->release(dmabuf);
+
+ if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
+ dma_resv_fini(dmabuf->resv);
+
+ WARN_ON(!list_empty(&dmabuf->attachments));
+ module_put(dmabuf->owner);
+ kfree(dmabuf->name);
+ kfree(dmabuf);
+}
+
+static int dma_buf_file_release(struct inode *inode, struct file *file)
+{
+ struct dma_buf *dmabuf;
+
+ if (!is_dma_buf_file(file))
+ return -EINVAL;
+
+ dmabuf = file->private_data;
+ if (dmabuf) {
+ mutex_lock(&db_list.lock);
+ list_del(&dmabuf->list_node);
+ mutex_unlock(&db_list.lock);
+ }
+
+ return 0;
+}
+
+static const struct dentry_operations dma_buf_dentry_ops = {
+ .d_dname = dmabuffs_dname,
+ .d_release = dma_buf_release,
+};
+
+static struct vfsmount *dma_buf_mnt;
+
+static int dma_buf_fs_init_context(struct fs_context *fc)
+{
+ struct pseudo_fs_context *ctx;
+
+ ctx = init_pseudo(fc, DMA_BUF_MAGIC);
+ if (!ctx)
+ return -ENOMEM;
+ ctx->dops = &dma_buf_dentry_ops;
+ return 0;
+}
+
+static struct file_system_type dma_buf_fs_type = {
+ .name = "dmabuf",
+ .init_fs_context = dma_buf_fs_init_context,
+ .kill_sb = kill_anon_super,
+};
+
+static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
+{
+ struct dma_buf *dmabuf;
+
+ if (!is_dma_buf_file(file))
+ return -EINVAL;
+
+ dmabuf = file->private_data;
+
+ /* check if buffer supports mmap */
+ if (!dmabuf->ops->mmap)
+ return -EINVAL;
+
+ /* check for overflowing the buffer's size */
+ if (vma->vm_pgoff + vma_pages(vma) >
+ dmabuf->size >> PAGE_SHIFT)
+ return -EINVAL;
+
+ return dmabuf->ops->mmap(dmabuf, vma);
+}
+
+static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
+{
+ struct dma_buf *dmabuf;
+ loff_t base;
+
+ if (!is_dma_buf_file(file))
+ return -EBADF;
+
+ dmabuf = file->private_data;
+
+ /* only support discovering the end of the buffer,
+ but also allow SEEK_SET to maintain the idiomatic
+ SEEK_END(0), SEEK_CUR(0) pattern */
+ if (whence == SEEK_END)
+ base = dmabuf->size;
+ else if (whence == SEEK_SET)
+ base = 0;
+ else
+ return -EINVAL;
+
+ if (offset != 0)
+ return -EINVAL;
+
+ return base + offset;
+}
+
+/**
+ * DOC: implicit fence polling
+ *
+ * To support cross-device and cross-driver synchronization of buffer access
+ * implicit fences (represented internally in the kernel with &struct dma_fence)
+ * can be attached to a &dma_buf. The glue for that and a few related things are
+ * provided in the &dma_resv structure.
+ *
+ * Userspace can query the state of these implicitly tracked fences using poll()
+ * and related system calls:
+ *
+ * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
+ * most recent write or exclusive fence.
+ *
+ * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
+ * all attached fences, shared and exclusive ones.
+ *
+ * Note that this only signals the completion of the respective fences, i.e. the
+ * DMA transfers are complete. Cache flushing and any other necessary
+ * preparations before CPU access can begin still need to happen.
+ *
+ * As an alternative to poll(), the set of fences on DMA buffer can be
+ * exported as a &sync_file using &dma_buf_sync_file_export.
+ */
+
+static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
+ struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
+ unsigned long flags;
+
+ spin_lock_irqsave(&dcb->poll->lock, flags);
+ wake_up_locked_poll(dcb->poll, dcb->active);
+ dcb->active = 0;
+ spin_unlock_irqrestore(&dcb->poll->lock, flags);
+ dma_fence_put(fence);
+ /* Paired with get_file in dma_buf_poll */
+ fput(dmabuf->file);
+}
+
+static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
+ struct dma_buf_poll_cb_t *dcb)
+{
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+ int r;
+
+ dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
+ fence) {
+ dma_fence_get(fence);
+ r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
+ if (!r)
+ return true;
+ dma_fence_put(fence);
+ }
+
+ return false;
+}
+
+static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
+{
+ struct dma_buf *dmabuf;
+ struct dma_resv *resv;
+ __poll_t events;
+
+ dmabuf = file->private_data;
+ if (!dmabuf || !dmabuf->resv)
+ return EPOLLERR;
+
+ resv = dmabuf->resv;
+
+ poll_wait(file, &dmabuf->poll, poll);
+
+ events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
+ if (!events)
+ return 0;
+
+ dma_resv_lock(resv, NULL);
+
+ if (events & EPOLLOUT) {
+ struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
+
+ /* Check that callback isn't busy */
+ spin_lock_irq(&dmabuf->poll.lock);
+ if (dcb->active)
+ events &= ~EPOLLOUT;
+ else
+ dcb->active = EPOLLOUT;
+ spin_unlock_irq(&dmabuf->poll.lock);
+
+ if (events & EPOLLOUT) {
+ /* Paired with fput in dma_buf_poll_cb */
+ get_file(dmabuf->file);
+
+ if (!dma_buf_poll_add_cb(resv, true, dcb))
+ /* No callback queued, wake up any other waiters */
+ dma_buf_poll_cb(NULL, &dcb->cb);
+ else
+ events &= ~EPOLLOUT;
+ }
+ }
+
+ if (events & EPOLLIN) {
+ struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
+
+ /* Check that callback isn't busy */
+ spin_lock_irq(&dmabuf->poll.lock);
+ if (dcb->active)
+ events &= ~EPOLLIN;
+ else
+ dcb->active = EPOLLIN;
+ spin_unlock_irq(&dmabuf->poll.lock);
+
+ if (events & EPOLLIN) {
+ /* Paired with fput in dma_buf_poll_cb */
+ get_file(dmabuf->file);
+
+ if (!dma_buf_poll_add_cb(resv, false, dcb))
+ /* No callback queued, wake up any other waiters */
+ dma_buf_poll_cb(NULL, &dcb->cb);
+ else
+ events &= ~EPOLLIN;
+ }
+ }
+
+ dma_resv_unlock(resv);
+ return events;
+}
+
+/**
+ * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
+ * It could support changing the name of the dma-buf if the same
+ * piece of memory is used for multiple purpose between different devices.
+ *
+ * @dmabuf: [in] dmabuf buffer that will be renamed.
+ * @buf: [in] A piece of userspace memory that contains the name of
+ * the dma-buf.
+ *
+ * Returns 0 on success. If the dma-buf buffer is already attached to
+ * devices, return -EBUSY.
+ *
+ */
+static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
+{
+ char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
+
+ if (IS_ERR(name))
+ return PTR_ERR(name);
+
+ spin_lock(&dmabuf->name_lock);
+ kfree(dmabuf->name);
+ dmabuf->name = name;
+ spin_unlock(&dmabuf->name_lock);
+
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_SYNC_FILE)
+static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
+ void __user *user_data)
+{
+ struct dma_buf_export_sync_file arg;
+ enum dma_resv_usage usage;
+ struct dma_fence *fence = NULL;
+ struct sync_file *sync_file;
+ int fd, ret;
+
+ if (copy_from_user(&arg, user_data, sizeof(arg)))
+ return -EFAULT;
+
+ if (arg.flags & ~DMA_BUF_SYNC_RW)
+ return -EINVAL;
+
+ if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
+ return -EINVAL;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0)
+ return fd;
+
+ usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);
+ ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);
+ if (ret)
+ goto err_put_fd;
+
+ if (!fence)
+ fence = dma_fence_get_stub();
+
+ sync_file = sync_file_create(fence);
+
+ dma_fence_put(fence);
+
+ if (!sync_file) {
+ ret = -ENOMEM;
+ goto err_put_fd;
+ }
+
+ arg.fd = fd;
+ if (copy_to_user(user_data, &arg, sizeof(arg))) {
+ ret = -EFAULT;
+ goto err_put_file;
+ }
+
+ fd_install(fd, sync_file->file);
+
+ return 0;
+
+err_put_file:
+ fput(sync_file->file);
+err_put_fd:
+ put_unused_fd(fd);
+ return ret;
+}
+
+static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
+ const void __user *user_data)
+{
+ struct dma_buf_import_sync_file arg;
+ struct dma_fence *fence, *f;
+ enum dma_resv_usage usage;
+ struct dma_fence_unwrap iter;
+ unsigned int num_fences;
+ int ret = 0;
+
+ if (copy_from_user(&arg, user_data, sizeof(arg)))
+ return -EFAULT;
+
+ if (arg.flags & ~DMA_BUF_SYNC_RW)
+ return -EINVAL;
+
+ if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
+ return -EINVAL;
+
+ fence = sync_file_get_fence(arg.fd);
+ if (!fence)
+ return -EINVAL;
+
+ usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
+ DMA_RESV_USAGE_READ;
+
+ num_fences = 0;
+ dma_fence_unwrap_for_each(f, &iter, fence)
+ ++num_fences;
+
+ if (num_fences > 0) {
+ dma_resv_lock(dmabuf->resv, NULL);
+
+ ret = dma_resv_reserve_fences(dmabuf->resv, num_fences);
+ if (!ret) {
+ dma_fence_unwrap_for_each(f, &iter, fence)
+ dma_resv_add_fence(dmabuf->resv, f, usage);
+ }
+
+ dma_resv_unlock(dmabuf->resv);
+ }
+
+ dma_fence_put(fence);
+
+ return ret;
+}
+#endif
+
+static long dma_buf_ioctl(struct file *file,
+ unsigned int cmd, unsigned long arg)
+{
+ struct dma_buf *dmabuf;
+ struct dma_buf_sync sync;
+ enum dma_data_direction direction;
+ int ret;
+
+ dmabuf = file->private_data;
+
+ switch (cmd) {
+ case DMA_BUF_IOCTL_SYNC:
+ if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
+ return -EFAULT;
+
+ if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
+ return -EINVAL;
+
+ switch (sync.flags & DMA_BUF_SYNC_RW) {
+ case DMA_BUF_SYNC_READ:
+ direction = DMA_FROM_DEVICE;
+ break;
+ case DMA_BUF_SYNC_WRITE:
+ direction = DMA_TO_DEVICE;
+ break;
+ case DMA_BUF_SYNC_RW:
+ direction = DMA_BIDIRECTIONAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (sync.flags & DMA_BUF_SYNC_END)
+ ret = dma_buf_end_cpu_access(dmabuf, direction);
+ else
+ ret = dma_buf_begin_cpu_access(dmabuf, direction);
+
+ return ret;
+
+ case DMA_BUF_SET_NAME_A:
+ case DMA_BUF_SET_NAME_B:
+ return dma_buf_set_name(dmabuf, (const char __user *)arg);
+
+#if IS_ENABLED(CONFIG_SYNC_FILE)
+ case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
+ return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
+ case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
+ return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
+#endif
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
+{
+ struct dma_buf *dmabuf = file->private_data;
+
+ seq_printf(m, "size:\t%zu\n", dmabuf->size);
+ /* Don't count the temporary reference taken inside procfs seq_show */
+ seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
+ seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
+ spin_lock(&dmabuf->name_lock);
+ if (dmabuf->name)
+ seq_printf(m, "name:\t%s\n", dmabuf->name);
+ spin_unlock(&dmabuf->name_lock);
+}
+
+static const struct file_operations dma_buf_fops = {
+ .release = dma_buf_file_release,
+ .mmap = dma_buf_mmap_internal,
+ .llseek = dma_buf_llseek,
+ .poll = dma_buf_poll,
+ .unlocked_ioctl = dma_buf_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+ .show_fdinfo = dma_buf_show_fdinfo,
+};
+
+/*
+ * is_dma_buf_file - Check if struct file* is associated with dma_buf
+ */
+static inline int is_dma_buf_file(struct file *file)
+{
+ return file->f_op == &dma_buf_fops;
+}
+
+static struct file *dma_buf_getfile(size_t size, int flags)
+{
+ static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
+ struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
+ struct file *file;
+
+ if (IS_ERR(inode))
+ return ERR_CAST(inode);
+
+ inode->i_size = size;
+ inode_set_bytes(inode, size);
+
+ /*
+ * The ->i_ino acquired from get_next_ino() is not unique thus
+ * not suitable for using it as dentry name by dmabuf stats.
+ * Override ->i_ino with the unique and dmabuffs specific
+ * value.
+ */
+ inode->i_ino = atomic64_add_return(1, &dmabuf_inode);
+ flags &= O_ACCMODE | O_NONBLOCK;
+ file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
+ flags, &dma_buf_fops);
+ if (IS_ERR(file))
+ goto err_alloc_file;
+
+ return file;
+
+err_alloc_file:
+ iput(inode);
+ return file;
+}
+
+/**
+ * DOC: dma buf device access
+ *
+ * For device DMA access to a shared DMA buffer the usual sequence of operations
+ * is fairly simple:
+ *
+ * 1. The exporter defines his exporter instance using
+ * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
+ * buffer object into a &dma_buf. It then exports that &dma_buf to userspace
+ * as a file descriptor by calling dma_buf_fd().
+ *
+ * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
+ * to share with: First the file descriptor is converted to a &dma_buf using
+ * dma_buf_get(). Then the buffer is attached to the device using
+ * dma_buf_attach().
+ *
+ * Up to this stage the exporter is still free to migrate or reallocate the
+ * backing storage.
+ *
+ * 3. Once the buffer is attached to all devices userspace can initiate DMA
+ * access to the shared buffer. In the kernel this is done by calling
+ * dma_buf_map_attachment() and dma_buf_unmap_attachment().
+ *
+ * 4. Once a driver is done with a shared buffer it needs to call
+ * dma_buf_detach() (after cleaning up any mappings) and then release the
+ * reference acquired with dma_buf_get() by calling dma_buf_put().
+ *
+ * For the detailed semantics exporters are expected to implement see
+ * &dma_buf_ops.
+ */
+
+/**
+ * dma_buf_export - Creates a new dma_buf, and associates an anon file
+ * with this buffer, so it can be exported.
+ * Also connect the allocator specific data and ops to the buffer.
+ * Additionally, provide a name string for exporter; useful in debugging.
+ *
+ * @exp_info: [in] holds all the export related information provided
+ * by the exporter. see &struct dma_buf_export_info
+ * for further details.
+ *
+ * Returns, on success, a newly created struct dma_buf object, which wraps the
+ * supplied private data and operations for struct dma_buf_ops. On either
+ * missing ops, or error in allocating struct dma_buf, will return negative
+ * error.
+ *
+ * For most cases the easiest way to create @exp_info is through the
+ * %DEFINE_DMA_BUF_EXPORT_INFO macro.
+ */
+struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
+{
+ struct dma_buf *dmabuf;
+ struct dma_resv *resv = exp_info->resv;
+ struct file *file;
+ size_t alloc_size = sizeof(struct dma_buf);
+ int ret;
+
+ if (WARN_ON(!exp_info->priv || !exp_info->ops
+ || !exp_info->ops->map_dma_buf
+ || !exp_info->ops->unmap_dma_buf
+ || !exp_info->ops->release))
+ return ERR_PTR(-EINVAL);
+
+ if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
+ (exp_info->ops->pin || exp_info->ops->unpin)))
+ return ERR_PTR(-EINVAL);
+
+ if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
+ return ERR_PTR(-EINVAL);
+
+ if (!try_module_get(exp_info->owner))
+ return ERR_PTR(-ENOENT);
+
+ file = dma_buf_getfile(exp_info->size, exp_info->flags);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto err_module;
+ }
+
+ if (!exp_info->resv)
+ alloc_size += sizeof(struct dma_resv);
+ else
+ /* prevent &dma_buf[1] == dma_buf->resv */
+ alloc_size += 1;
+ dmabuf = kzalloc(alloc_size, GFP_KERNEL);
+ if (!dmabuf) {
+ ret = -ENOMEM;
+ goto err_file;
+ }
+
+ dmabuf->priv = exp_info->priv;
+ dmabuf->ops = exp_info->ops;
+ dmabuf->size = exp_info->size;
+ dmabuf->exp_name = exp_info->exp_name;
+ dmabuf->owner = exp_info->owner;
+ spin_lock_init(&dmabuf->name_lock);
+ init_waitqueue_head(&dmabuf->poll);
+ dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
+ dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
+ mutex_init(&dmabuf->lock);
+ INIT_LIST_HEAD(&dmabuf->attachments);
+
+ if (!resv) {
+ dmabuf->resv = (struct dma_resv *)&dmabuf[1];
+ dma_resv_init(dmabuf->resv);
+ } else {
+ dmabuf->resv = resv;
+ }
+
+ ret = dma_buf_stats_setup(dmabuf, file);
+ if (ret)
+ goto err_dmabuf;
+
+ file->private_data = dmabuf;
+ file->f_path.dentry->d_fsdata = dmabuf;
+ dmabuf->file = file;
+
+ mutex_lock(&db_list.lock);
+ list_add(&dmabuf->list_node, &db_list.head);
+ mutex_unlock(&db_list.lock);
+
+ return dmabuf;
+
+err_dmabuf:
+ if (!resv)
+ dma_resv_fini(dmabuf->resv);
+ kfree(dmabuf);
+err_file:
+ fput(file);
+err_module:
+ module_put(exp_info->owner);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_export, DMA_BUF);
+
+/**
+ * dma_buf_fd - returns a file descriptor for the given struct dma_buf
+ * @dmabuf: [in] pointer to dma_buf for which fd is required.
+ * @flags: [in] flags to give to fd
+ *
+ * On success, returns an associated 'fd'. Else, returns error.
+ */
+int dma_buf_fd(struct dma_buf *dmabuf, int flags)
+{
+ int fd;
+
+ if (!dmabuf || !dmabuf->file)
+ return -EINVAL;
+
+ fd = get_unused_fd_flags(flags);
+ if (fd < 0)
+ return fd;
+
+ fd_install(fd, dmabuf->file);
+
+ return fd;
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_fd, DMA_BUF);
+
+/**
+ * dma_buf_get - returns the struct dma_buf related to an fd
+ * @fd: [in] fd associated with the struct dma_buf to be returned
+ *
+ * On success, returns the struct dma_buf associated with an fd; uses
+ * file's refcounting done by fget to increase refcount. returns ERR_PTR
+ * otherwise.
+ */
+struct dma_buf *dma_buf_get(int fd)
+{
+ struct file *file;
+
+ file = fget(fd);
+
+ if (!file)
+ return ERR_PTR(-EBADF);
+
+ if (!is_dma_buf_file(file)) {
+ fput(file);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return file->private_data;
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_get, DMA_BUF);
+
+/**
+ * dma_buf_put - decreases refcount of the buffer
+ * @dmabuf: [in] buffer to reduce refcount of
+ *
+ * Uses file's refcounting done implicitly by fput().
+ *
+ * If, as a result of this call, the refcount becomes 0, the 'release' file
+ * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
+ * in turn, and frees the memory allocated for dmabuf when exported.
+ */
+void dma_buf_put(struct dma_buf *dmabuf)
+{
+ if (WARN_ON(!dmabuf || !dmabuf->file))
+ return;
+
+ fput(dmabuf->file);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_put, DMA_BUF);
+
+static void mangle_sg_table(struct sg_table *sg_table)
+{
+#ifdef CONFIG_DMABUF_DEBUG
+ int i;
+ struct scatterlist *sg;
+
+ /* To catch abuse of the underlying struct page by importers mix
+ * up the bits, but take care to preserve the low SG_ bits to
+ * not corrupt the sgt. The mixing is undone in __unmap_dma_buf
+ * before passing the sgt back to the exporter. */
+ for_each_sgtable_sg(sg_table, sg, i)
+ sg->page_link ^= ~0xffUL;
+#endif
+
+}
+static struct sg_table * __map_dma_buf(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+{
+ struct sg_table *sg_table;
+ signed long ret;
+
+ sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
+ if (IS_ERR_OR_NULL(sg_table))
+ return sg_table;
+
+ if (!dma_buf_attachment_is_dynamic(attach)) {
+ ret = dma_resv_wait_timeout(attach->dmabuf->resv,
+ DMA_RESV_USAGE_KERNEL, true,
+ MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0) {
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table,
+ direction);
+ return ERR_PTR(ret);
+ }
+ }
+
+ mangle_sg_table(sg_table);
+ return sg_table;
+}
+
+/**
+ * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
+ * @dmabuf: [in] buffer to attach device to.
+ * @dev: [in] device to be attached.
+ * @importer_ops: [in] importer operations for the attachment
+ * @importer_priv: [in] importer private pointer for the attachment
+ *
+ * Returns struct dma_buf_attachment pointer for this attachment. Attachments
+ * must be cleaned up by calling dma_buf_detach().
+ *
+ * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
+ * functionality.
+ *
+ * Returns:
+ *
+ * A pointer to newly created &dma_buf_attachment on success, or a negative
+ * error code wrapped into a pointer on failure.
+ *
+ * Note that this can fail if the backing storage of @dmabuf is in a place not
+ * accessible to @dev, and cannot be moved to a more suitable place. This is
+ * indicated with the error code -EBUSY.
+ */
+struct dma_buf_attachment *
+dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
+ const struct dma_buf_attach_ops *importer_ops,
+ void *importer_priv)
+{
+ struct dma_buf_attachment *attach;
+ int ret;
+
+ if (WARN_ON(!dmabuf || !dev))
+ return ERR_PTR(-EINVAL);
+
+ if (WARN_ON(importer_ops && !importer_ops->move_notify))
+ return ERR_PTR(-EINVAL);
+
+ attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+ if (!attach)
+ return ERR_PTR(-ENOMEM);
+
+ attach->dev = dev;
+ attach->dmabuf = dmabuf;
+ if (importer_ops)
+ attach->peer2peer = importer_ops->allow_peer2peer;
+ attach->importer_ops = importer_ops;
+ attach->importer_priv = importer_priv;
+
+ if (dmabuf->ops->attach) {
+ ret = dmabuf->ops->attach(dmabuf, attach);
+ if (ret)
+ goto err_attach;
+ }
+ dma_resv_lock(dmabuf->resv, NULL);
+ list_add(&attach->node, &dmabuf->attachments);
+ dma_resv_unlock(dmabuf->resv);
+
+ /* When either the importer or the exporter can't handle dynamic
+ * mappings we cache the mapping here to avoid issues with the
+ * reservation object lock.
+ */
+ if (dma_buf_attachment_is_dynamic(attach) !=
+ dma_buf_is_dynamic(dmabuf)) {
+ struct sg_table *sgt;
+
+ if (dma_buf_is_dynamic(attach->dmabuf)) {
+ dma_resv_lock(attach->dmabuf->resv, NULL);
+ ret = dmabuf->ops->pin(attach);
+ if (ret)
+ goto err_unlock;
+ }
+
+ sgt = __map_dma_buf(attach, DMA_BIDIRECTIONAL);
+ if (!sgt)
+ sgt = ERR_PTR(-ENOMEM);
+ if (IS_ERR(sgt)) {
+ ret = PTR_ERR(sgt);
+ goto err_unpin;
+ }
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_unlock(attach->dmabuf->resv);
+ attach->sgt = sgt;
+ attach->dir = DMA_BIDIRECTIONAL;
+ }
+
+ return attach;
+
+err_attach:
+ kfree(attach);
+ return ERR_PTR(ret);
+
+err_unpin:
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dmabuf->ops->unpin(attach);
+
+err_unlock:
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_unlock(attach->dmabuf->resv);
+
+ dma_buf_detach(dmabuf, attach);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, DMA_BUF);
+
+/**
+ * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
+ * @dmabuf: [in] buffer to attach device to.
+ * @dev: [in] device to be attached.
+ *
+ * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
+ * mapping.
+ */
+struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
+ struct device *dev)
+{
+ return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_attach, DMA_BUF);
+
+static void __unmap_dma_buf(struct dma_buf_attachment *attach,
+ struct sg_table *sg_table,
+ enum dma_data_direction direction)
+{
+ /* uses XOR, hence this unmangles */
+ mangle_sg_table(sg_table);
+
+ attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+}
+
+/**
+ * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
+ * @dmabuf: [in] buffer to detach from.
+ * @attach: [in] attachment to be detached; is free'd after this call.
+ *
+ * Clean up a device attachment obtained by calling dma_buf_attach().
+ *
+ * Optionally this calls &dma_buf_ops.detach for device-specific detach.
+ */
+void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
+{
+ if (WARN_ON(!dmabuf || !attach))
+ return;
+
+ if (attach->sgt) {
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_lock(attach->dmabuf->resv, NULL);
+
+ __unmap_dma_buf(attach, attach->sgt, attach->dir);
+
+ if (dma_buf_is_dynamic(attach->dmabuf)) {
+ dmabuf->ops->unpin(attach);
+ dma_resv_unlock(attach->dmabuf->resv);
+ }
+ }
+
+ dma_resv_lock(dmabuf->resv, NULL);
+ list_del(&attach->node);
+ dma_resv_unlock(dmabuf->resv);
+ if (dmabuf->ops->detach)
+ dmabuf->ops->detach(dmabuf, attach);
+
+ kfree(attach);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_detach, DMA_BUF);
+
+/**
+ * dma_buf_pin - Lock down the DMA-buf
+ * @attach: [in] attachment which should be pinned
+ *
+ * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
+ * call this, and only for limited use cases like scanout and not for temporary
+ * pin operations. It is not permitted to allow userspace to pin arbitrary
+ * amounts of buffers through this interface.
+ *
+ * Buffers must be unpinned by calling dma_buf_unpin().
+ *
+ * Returns:
+ * 0 on success, negative error code on failure.
+ */
+int dma_buf_pin(struct dma_buf_attachment *attach)
+{
+ struct dma_buf *dmabuf = attach->dmabuf;
+ int ret = 0;
+
+ WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+
+ dma_resv_assert_held(dmabuf->resv);
+
+ if (dmabuf->ops->pin)
+ ret = dmabuf->ops->pin(attach);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_pin, DMA_BUF);
+
+/**
+ * dma_buf_unpin - Unpin a DMA-buf
+ * @attach: [in] attachment which should be unpinned
+ *
+ * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
+ * any mapping of @attach again and inform the importer through
+ * &dma_buf_attach_ops.move_notify.
+ */
+void dma_buf_unpin(struct dma_buf_attachment *attach)
+{
+ struct dma_buf *dmabuf = attach->dmabuf;
+
+ WARN_ON(!dma_buf_attachment_is_dynamic(attach));
+
+ dma_resv_assert_held(dmabuf->resv);
+
+ if (dmabuf->ops->unpin)
+ dmabuf->ops->unpin(attach);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, DMA_BUF);
+
+/**
+ * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
+ * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
+ * dma_buf_ops.
+ * @attach: [in] attachment whose scatterlist is to be returned
+ * @direction: [in] direction of DMA transfer
+ *
+ * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
+ * on error. May return -EINTR if it is interrupted by a signal.
+ *
+ * On success, the DMA addresses and lengths in the returned scatterlist are
+ * PAGE_SIZE aligned.
+ *
+ * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
+ * the underlying backing storage is pinned for as long as a mapping exists,
+ * therefore users/importers should not hold onto a mapping for undue amounts of
+ * time.
+ *
+ * Important: Dynamic importers must wait for the exclusive fence of the struct
+ * dma_resv attached to the DMA-BUF first.
+ */
+struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+{
+ struct sg_table *sg_table;
+ int r;
+
+ might_sleep();
+
+ if (WARN_ON(!attach || !attach->dmabuf))
+ return ERR_PTR(-EINVAL);
+
+ if (dma_buf_attachment_is_dynamic(attach))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
+ if (attach->sgt) {
+ /*
+ * Two mappings with different directions for the same
+ * attachment are not allowed.
+ */
+ if (attach->dir != direction &&
+ attach->dir != DMA_BIDIRECTIONAL)
+ return ERR_PTR(-EBUSY);
+
+ return attach->sgt;
+ }
+
+ if (dma_buf_is_dynamic(attach->dmabuf)) {
+ dma_resv_assert_held(attach->dmabuf->resv);
+ if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
+ r = attach->dmabuf->ops->pin(attach);
+ if (r)
+ return ERR_PTR(r);
+ }
+ }
+
+ sg_table = __map_dma_buf(attach, direction);
+ if (!sg_table)
+ sg_table = ERR_PTR(-ENOMEM);
+
+ if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
+ !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
+ attach->dmabuf->ops->unpin(attach);
+
+ if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
+ attach->sgt = sg_table;
+ attach->dir = direction;
+ }
+
+#ifdef CONFIG_DMA_API_DEBUG
+ if (!IS_ERR(sg_table)) {
+ struct scatterlist *sg;
+ u64 addr;
+ int len;
+ int i;
+
+ for_each_sgtable_dma_sg(sg_table, sg, i) {
+ addr = sg_dma_address(sg);
+ len = sg_dma_len(sg);
+ if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
+ pr_debug("%s: addr %llx or len %x is not page aligned!\n",
+ __func__, addr, len);
+ }
+ }
+ }
+#endif /* CONFIG_DMA_API_DEBUG */
+ return sg_table;
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, DMA_BUF);
+
+/**
+ * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
+ * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
+ * dma_buf_ops.
+ * @attach: [in] attachment whose scatterlist is to be returned
+ * @direction: [in] direction of DMA transfer
+ *
+ * Unlocked variant of dma_buf_map_attachment().
+ */
+struct sg_table *
+dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
+ enum dma_data_direction direction)
+{
+ struct sg_table *sg_table;
+
+ might_sleep();
+
+ if (WARN_ON(!attach || !attach->dmabuf))
+ return ERR_PTR(-EINVAL);
+
+ dma_resv_lock(attach->dmabuf->resv, NULL);
+ sg_table = dma_buf_map_attachment(attach, direction);
+ dma_resv_unlock(attach->dmabuf->resv);
+
+ return sg_table;
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, DMA_BUF);
+
+/**
+ * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
+ * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
+ * dma_buf_ops.
+ * @attach: [in] attachment to unmap buffer from
+ * @sg_table: [in] scatterlist info of the buffer to unmap
+ * @direction: [in] direction of DMA transfer
+ *
+ * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
+ */
+void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
+ struct sg_table *sg_table,
+ enum dma_data_direction direction)
+{
+ might_sleep();
+
+ if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
+ return;
+
+ if (dma_buf_attachment_is_dynamic(attach))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
+ if (attach->sgt == sg_table)
+ return;
+
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_resv_assert_held(attach->dmabuf->resv);
+
+ __unmap_dma_buf(attach, sg_table, direction);
+
+ if (dma_buf_is_dynamic(attach->dmabuf) &&
+ !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
+ dma_buf_unpin(attach);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, DMA_BUF);
+
+/**
+ * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
+ * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
+ * dma_buf_ops.
+ * @attach: [in] attachment to unmap buffer from
+ * @sg_table: [in] scatterlist info of the buffer to unmap
+ * @direction: [in] direction of DMA transfer
+ *
+ * Unlocked variant of dma_buf_unmap_attachment().
+ */
+void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
+ struct sg_table *sg_table,
+ enum dma_data_direction direction)
+{
+ might_sleep();
+
+ if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
+ return;
+
+ dma_resv_lock(attach->dmabuf->resv, NULL);
+ dma_buf_unmap_attachment(attach, sg_table, direction);
+ dma_resv_unlock(attach->dmabuf->resv);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, DMA_BUF);
+
+/**
+ * dma_buf_move_notify - notify attachments that DMA-buf is moving
+ *
+ * @dmabuf: [in] buffer which is moving
+ *
+ * Informs all attachmenst that they need to destroy and recreated all their
+ * mappings.
+ */
+void dma_buf_move_notify(struct dma_buf *dmabuf)
+{
+ struct dma_buf_attachment *attach;
+
+ dma_resv_assert_held(dmabuf->resv);
+
+ list_for_each_entry(attach, &dmabuf->attachments, node)
+ if (attach->importer_ops)
+ attach->importer_ops->move_notify(attach);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, DMA_BUF);
+
+/**
+ * DOC: cpu access
+ *
+ * There are mutliple reasons for supporting CPU access to a dma buffer object:
+ *
+ * - Fallback operations in the kernel, for example when a device is connected
+ * over USB and the kernel needs to shuffle the data around first before
+ * sending it away. Cache coherency is handled by braketing any transactions
+ * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
+ * access.
+ *
+ * Since for most kernel internal dma-buf accesses need the entire buffer, a
+ * vmap interface is introduced. Note that on very old 32-bit architectures
+ * vmalloc space might be limited and result in vmap calls failing.
+ *
+ * Interfaces::
+ *
+ * void \*dma_buf_vmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
+ * void dma_buf_vunmap(struct dma_buf \*dmabuf, struct iosys_map \*map)
+ *
+ * The vmap call can fail if there is no vmap support in the exporter, or if
+ * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
+ * count for all vmap access and calls down into the exporter's vmap function
+ * only when no vmapping exists, and only unmaps it once. Protection against
+ * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
+ *
+ * - For full compatibility on the importer side with existing userspace
+ * interfaces, which might already support mmap'ing buffers. This is needed in
+ * many processing pipelines (e.g. feeding a software rendered image into a
+ * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
+ * framework already supported this and for DMA buffer file descriptors to
+ * replace ION buffers mmap support was needed.
+ *
+ * There is no special interfaces, userspace simply calls mmap on the dma-buf
+ * fd. But like for CPU access there's a need to braket the actual access,
+ * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
+ * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
+ * be restarted.
+ *
+ * Some systems might need some sort of cache coherency management e.g. when
+ * CPU and GPU domains are being accessed through dma-buf at the same time.
+ * To circumvent this problem there are begin/end coherency markers, that
+ * forward directly to existing dma-buf device drivers vfunc hooks. Userspace
+ * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
+ * sequence would be used like following:
+ *
+ * - mmap dma-buf fd
+ * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
+ * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
+ * want (with the new data being consumed by say the GPU or the scanout
+ * device)
+ * - munmap once you don't need the buffer any more
+ *
+ * For correctness and optimal performance, it is always required to use
+ * SYNC_START and SYNC_END before and after, respectively, when accessing the
+ * mapped address. Userspace cannot rely on coherent access, even when there
+ * are systems where it just works without calling these ioctls.
+ *
+ * - And as a CPU fallback in userspace processing pipelines.
+ *
+ * Similar to the motivation for kernel cpu access it is again important that
+ * the userspace code of a given importing subsystem can use the same
+ * interfaces with a imported dma-buf buffer object as with a native buffer
+ * object. This is especially important for drm where the userspace part of
+ * contemporary OpenGL, X, and other drivers is huge, and reworking them to
+ * use a different way to mmap a buffer rather invasive.
+ *
+ * The assumption in the current dma-buf interfaces is that redirecting the
+ * initial mmap is all that's needed. A survey of some of the existing
+ * subsystems shows that no driver seems to do any nefarious thing like
+ * syncing up with outstanding asynchronous processing on the device or
+ * allocating special resources at fault time. So hopefully this is good
+ * enough, since adding interfaces to intercept pagefaults and allow pte
+ * shootdowns would increase the complexity quite a bit.
+ *
+ * Interface::
+ *
+ * int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
+ * unsigned long);
+ *
+ * If the importing subsystem simply provides a special-purpose mmap call to
+ * set up a mapping in userspace, calling do_mmap with &dma_buf.file will
+ * equally achieve that for a dma-buf object.
+ */
+
+static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ bool write = (direction == DMA_BIDIRECTIONAL ||
+ direction == DMA_TO_DEVICE);
+ struct dma_resv *resv = dmabuf->resv;
+ long ret;
+
+ /* Wait on any implicit rendering fences */
+ ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
+ true, MAX_SCHEDULE_TIMEOUT);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/**
+ * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
+ * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
+ * preparations. Coherency is only guaranteed in the specified range for the
+ * specified access direction.
+ * @dmabuf: [in] buffer to prepare cpu access for.
+ * @direction: [in] length of range for cpu access.
+ *
+ * After the cpu access is complete the caller should call
+ * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
+ * it guaranteed to be coherent with other DMA access.
+ *
+ * This function will also wait for any DMA transactions tracked through
+ * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
+ * synchronization this function will only ensure cache coherency, callers must
+ * ensure synchronization with such DMA transactions on their own.
+ *
+ * Can return negative error values, returns 0 on success.
+ */
+int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ int ret = 0;
+
+ if (WARN_ON(!dmabuf))
+ return -EINVAL;
+
+ might_lock(&dmabuf->resv->lock.base);
+
+ if (dmabuf->ops->begin_cpu_access)
+ ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
+
+ /* Ensure that all fences are waited upon - but we first allow
+ * the native handler the chance to do so more efficiently if it
+ * chooses. A double invocation here will be reasonably cheap no-op.
+ */
+ if (ret == 0)
+ ret = __dma_buf_begin_cpu_access(dmabuf, direction);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, DMA_BUF);
+
+/**
+ * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
+ * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
+ * actions. Coherency is only guaranteed in the specified range for the
+ * specified access direction.
+ * @dmabuf: [in] buffer to complete cpu access for.
+ * @direction: [in] length of range for cpu access.
+ *
+ * This terminates CPU access started with dma_buf_begin_cpu_access().
+ *
+ * Can return negative error values, returns 0 on success.
+ */
+int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ int ret = 0;
+
+ WARN_ON(!dmabuf);
+
+ might_lock(&dmabuf->resv->lock.base);
+
+ if (dmabuf->ops->end_cpu_access)
+ ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
+
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, DMA_BUF);
+
+
+/**
+ * dma_buf_mmap - Setup up a userspace mmap with the given vma
+ * @dmabuf: [in] buffer that should back the vma
+ * @vma: [in] vma for the mmap
+ * @pgoff: [in] offset in pages where this mmap should start within the
+ * dma-buf buffer.
+ *
+ * This function adjusts the passed in vma so that it points at the file of the
+ * dma_buf operation. It also adjusts the starting pgoff and does bounds
+ * checking on the size of the vma. Then it calls the exporters mmap function to
+ * set up the mapping.
+ *
+ * Can return negative error values, returns 0 on success.
+ */
+int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
+ unsigned long pgoff)
+{
+ if (WARN_ON(!dmabuf || !vma))
+ return -EINVAL;
+
+ /* check if buffer supports mmap */
+ if (!dmabuf->ops->mmap)
+ return -EINVAL;
+
+ /* check for offset overflow */
+ if (pgoff + vma_pages(vma) < pgoff)
+ return -EOVERFLOW;
+
+ /* check for overflowing the buffer's size */
+ if (pgoff + vma_pages(vma) >
+ dmabuf->size >> PAGE_SHIFT)
+ return -EINVAL;
+
+ /* readjust the vma */
+ vma_set_file(vma, dmabuf->file);
+ vma->vm_pgoff = pgoff;
+
+ return dmabuf->ops->mmap(dmabuf, vma);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, DMA_BUF);
+
+/**
+ * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
+ * address space. Same restrictions as for vmap and friends apply.
+ * @dmabuf: [in] buffer to vmap
+ * @map: [out] returns the vmap pointer
+ *
+ * This call may fail due to lack of virtual mapping address space.
+ * These calls are optional in drivers. The intended use for them
+ * is for mapping objects linear in kernel space for high use objects.
+ *
+ * To ensure coherency users must call dma_buf_begin_cpu_access() and
+ * dma_buf_end_cpu_access() around any cpu access performed through this
+ * mapping.
+ *
+ * Returns 0 on success, or a negative errno code otherwise.
+ */
+int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
+{
+ struct iosys_map ptr;
+ int ret = 0;
+
+ iosys_map_clear(map);
+
+ if (WARN_ON(!dmabuf))
+ return -EINVAL;
+
+ if (!dmabuf->ops->vmap)
+ return -EINVAL;
+
+ mutex_lock(&dmabuf->lock);
+ if (dmabuf->vmapping_counter) {
+ dmabuf->vmapping_counter++;
+ BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
+ *map = dmabuf->vmap_ptr;
+ goto out_unlock;
+ }
+
+ BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
+
+ ret = dmabuf->ops->vmap(dmabuf, &ptr);
+ if (WARN_ON_ONCE(ret))
+ goto out_unlock;
+
+ dmabuf->vmap_ptr = ptr;
+ dmabuf->vmapping_counter = 1;
+
+ *map = dmabuf->vmap_ptr;
+
+out_unlock:
+ mutex_unlock(&dmabuf->lock);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, DMA_BUF);
+
+/**
+ * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
+ * @dmabuf: [in] buffer to vunmap
+ * @map: [in] vmap pointer to vunmap
+ */
+void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
+{
+ if (WARN_ON(!dmabuf))
+ return;
+
+ BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
+ BUG_ON(dmabuf->vmapping_counter == 0);
+ BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
+
+ mutex_lock(&dmabuf->lock);
+ if (--dmabuf->vmapping_counter == 0) {
+ if (dmabuf->ops->vunmap)
+ dmabuf->ops->vunmap(dmabuf, map);
+ iosys_map_clear(&dmabuf->vmap_ptr);
+ }
+ mutex_unlock(&dmabuf->lock);
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
+
+#ifdef CONFIG_DEBUG_FS
+static int dma_buf_debug_show(struct seq_file *s, void *unused)
+{
+ struct dma_buf *buf_obj;
+ struct dma_buf_attachment *attach_obj;
+ int count = 0, attach_count;
+ size_t size = 0;
+ int ret;
+
+ ret = mutex_lock_interruptible(&db_list.lock);
+
+ if (ret)
+ return ret;
+
+ seq_puts(s, "\nDma-buf Objects:\n");
+ seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
+ "size", "flags", "mode", "count", "ino");
+
+ list_for_each_entry(buf_obj, &db_list.head, list_node) {
+
+ ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
+ if (ret)
+ goto error_unlock;
+
+
+ spin_lock(&buf_obj->name_lock);
+ seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
+ buf_obj->size,
+ buf_obj->file->f_flags, buf_obj->file->f_mode,
+ file_count(buf_obj->file),
+ buf_obj->exp_name,
+ file_inode(buf_obj->file)->i_ino,
+ buf_obj->name ?: "<none>");
+ spin_unlock(&buf_obj->name_lock);
+
+ dma_resv_describe(buf_obj->resv, s);
+
+ seq_puts(s, "\tAttached Devices:\n");
+ attach_count = 0;
+
+ list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
+ seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
+ attach_count++;
+ }
+ dma_resv_unlock(buf_obj->resv);
+
+ seq_printf(s, "Total %d devices attached\n\n",
+ attach_count);
+
+ count++;
+ size += buf_obj->size;
+ }
+
+ seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
+
+ mutex_unlock(&db_list.lock);
+ return 0;
+
+error_unlock:
+ mutex_unlock(&db_list.lock);
+ return ret;
+}
+
+DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
+
+static struct dentry *dma_buf_debugfs_dir;
+
+static int dma_buf_init_debugfs(void)
+{
+ struct dentry *d;
+ int err = 0;
+
+ d = debugfs_create_dir("dma_buf", NULL);
+ if (IS_ERR(d))
+ return PTR_ERR(d);
+
+ dma_buf_debugfs_dir = d;
+
+ d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
+ NULL, &dma_buf_debug_fops);
+ if (IS_ERR(d)) {
+ pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
+ debugfs_remove_recursive(dma_buf_debugfs_dir);
+ dma_buf_debugfs_dir = NULL;
+ err = PTR_ERR(d);
+ }
+
+ return err;
+}
+
+static void dma_buf_uninit_debugfs(void)
+{
+ debugfs_remove_recursive(dma_buf_debugfs_dir);
+}
+#else
+static inline int dma_buf_init_debugfs(void)
+{
+ return 0;
+}
+static inline void dma_buf_uninit_debugfs(void)
+{
+}
+#endif
+
+static int __init dma_buf_init(void)
+{
+ int ret;
+
+ ret = dma_buf_init_sysfs_statistics();
+ if (ret)
+ return ret;
+
+ dma_buf_mnt = kern_mount(&dma_buf_fs_type);
+ if (IS_ERR(dma_buf_mnt))
+ return PTR_ERR(dma_buf_mnt);
+
+ mutex_init(&db_list.lock);
+ INIT_LIST_HEAD(&db_list.head);
+ dma_buf_init_debugfs();
+ return 0;
+}
+subsys_initcall(dma_buf_init);
+
+static void __exit dma_buf_deinit(void)
+{
+ dma_buf_uninit_debugfs();
+ kern_unmount(dma_buf_mnt);
+ dma_buf_uninit_sysfs_statistics();
+}
+__exitcall(dma_buf_deinit);
diff --git a/drivers/dma-buf/dma-fence-array.c b/drivers/dma-buf/dma-fence-array.c
new file mode 100644
index 000000000..5c8a70845
--- /dev/null
+++ b/drivers/dma-buf/dma-fence-array.c
@@ -0,0 +1,253 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * dma-fence-array: aggregate fences to be waited together
+ *
+ * Copyright (C) 2016 Collabora Ltd
+ * Copyright (C) 2016 Advanced Micro Devices, Inc.
+ * Authors:
+ * Gustavo Padovan <gustavo@padovan.org>
+ * Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <linux/dma-fence-array.h>
+
+#define PENDING_ERROR 1
+
+static const char *dma_fence_array_get_driver_name(struct dma_fence *fence)
+{
+ return "dma_fence_array";
+}
+
+static const char *dma_fence_array_get_timeline_name(struct dma_fence *fence)
+{
+ return "unbound";
+}
+
+static void dma_fence_array_set_pending_error(struct dma_fence_array *array,
+ int error)
+{
+ /*
+ * Propagate the first error reported by any of our fences, but only
+ * before we ourselves are signaled.
+ */
+ if (error)
+ cmpxchg(&array->base.error, PENDING_ERROR, error);
+}
+
+static void dma_fence_array_clear_pending_error(struct dma_fence_array *array)
+{
+ /* Clear the error flag if not actually set. */
+ cmpxchg(&array->base.error, PENDING_ERROR, 0);
+}
+
+static void irq_dma_fence_array_work(struct irq_work *wrk)
+{
+ struct dma_fence_array *array = container_of(wrk, typeof(*array), work);
+
+ dma_fence_array_clear_pending_error(array);
+
+ dma_fence_signal(&array->base);
+ dma_fence_put(&array->base);
+}
+
+static void dma_fence_array_cb_func(struct dma_fence *f,
+ struct dma_fence_cb *cb)
+{
+ struct dma_fence_array_cb *array_cb =
+ container_of(cb, struct dma_fence_array_cb, cb);
+ struct dma_fence_array *array = array_cb->array;
+
+ dma_fence_array_set_pending_error(array, f->error);
+
+ if (atomic_dec_and_test(&array->num_pending))
+ irq_work_queue(&array->work);
+ else
+ dma_fence_put(&array->base);
+}
+
+static bool dma_fence_array_enable_signaling(struct dma_fence *fence)
+{
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+ struct dma_fence_array_cb *cb = (void *)(&array[1]);
+ unsigned i;
+
+ for (i = 0; i < array->num_fences; ++i) {
+ cb[i].array = array;
+ /*
+ * As we may report that the fence is signaled before all
+ * callbacks are complete, we need to take an additional
+ * reference count on the array so that we do not free it too
+ * early. The core fence handling will only hold the reference
+ * until we signal the array as complete (but that is now
+ * insufficient).
+ */
+ dma_fence_get(&array->base);
+ if (dma_fence_add_callback(array->fences[i], &cb[i].cb,
+ dma_fence_array_cb_func)) {
+ int error = array->fences[i]->error;
+
+ dma_fence_array_set_pending_error(array, error);
+ dma_fence_put(&array->base);
+ if (atomic_dec_and_test(&array->num_pending)) {
+ dma_fence_array_clear_pending_error(array);
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+static bool dma_fence_array_signaled(struct dma_fence *fence)
+{
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+
+ if (atomic_read(&array->num_pending) > 0)
+ return false;
+
+ dma_fence_array_clear_pending_error(array);
+ return true;
+}
+
+static void dma_fence_array_release(struct dma_fence *fence)
+{
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+ unsigned i;
+
+ for (i = 0; i < array->num_fences; ++i)
+ dma_fence_put(array->fences[i]);
+
+ kfree(array->fences);
+ dma_fence_free(fence);
+}
+
+const struct dma_fence_ops dma_fence_array_ops = {
+ .get_driver_name = dma_fence_array_get_driver_name,
+ .get_timeline_name = dma_fence_array_get_timeline_name,
+ .enable_signaling = dma_fence_array_enable_signaling,
+ .signaled = dma_fence_array_signaled,
+ .release = dma_fence_array_release,
+};
+EXPORT_SYMBOL(dma_fence_array_ops);
+
+/**
+ * dma_fence_array_create - Create a custom fence array
+ * @num_fences: [in] number of fences to add in the array
+ * @fences: [in] array containing the fences
+ * @context: [in] fence context to use
+ * @seqno: [in] sequence number to use
+ * @signal_on_any: [in] signal on any fence in the array
+ *
+ * Allocate a dma_fence_array object and initialize the base fence with
+ * dma_fence_init().
+ * In case of error it returns NULL.
+ *
+ * The caller should allocate the fences array with num_fences size
+ * and fill it with the fences it wants to add to the object. Ownership of this
+ * array is taken and dma_fence_put() is used on each fence on release.
+ *
+ * If @signal_on_any is true the fence array signals if any fence in the array
+ * signals, otherwise it signals when all fences in the array signal.
+ */
+struct dma_fence_array *dma_fence_array_create(int num_fences,
+ struct dma_fence **fences,
+ u64 context, unsigned seqno,
+ bool signal_on_any)
+{
+ struct dma_fence_array *array;
+ size_t size = sizeof(*array);
+
+ WARN_ON(!num_fences || !fences);
+
+ /* Allocate the callback structures behind the array. */
+ size += num_fences * sizeof(struct dma_fence_array_cb);
+ array = kzalloc(size, GFP_KERNEL);
+ if (!array)
+ return NULL;
+
+ spin_lock_init(&array->lock);
+ dma_fence_init(&array->base, &dma_fence_array_ops, &array->lock,
+ context, seqno);
+ init_irq_work(&array->work, irq_dma_fence_array_work);
+
+ array->num_fences = num_fences;
+ atomic_set(&array->num_pending, signal_on_any ? 1 : num_fences);
+ array->fences = fences;
+
+ array->base.error = PENDING_ERROR;
+
+ /*
+ * dma_fence_array objects should never contain any other fence
+ * containers or otherwise we run into recursion and potential kernel
+ * stack overflow on operations on the dma_fence_array.
+ *
+ * The correct way of handling this is to flatten out the array by the
+ * caller instead.
+ *
+ * Enforce this here by checking that we don't create a dma_fence_array
+ * with any container inside.
+ */
+ while (num_fences--)
+ WARN_ON(dma_fence_is_container(fences[num_fences]));
+
+ return array;
+}
+EXPORT_SYMBOL(dma_fence_array_create);
+
+/**
+ * dma_fence_match_context - Check if all fences are from the given context
+ * @fence: [in] fence or fence array
+ * @context: [in] fence context to check all fences against
+ *
+ * Checks the provided fence or, for a fence array, all fences in the array
+ * against the given context. Returns false if any fence is from a different
+ * context.
+ */
+bool dma_fence_match_context(struct dma_fence *fence, u64 context)
+{
+ struct dma_fence_array *array = to_dma_fence_array(fence);
+ unsigned i;
+
+ if (!dma_fence_is_array(fence))
+ return fence->context == context;
+
+ for (i = 0; i < array->num_fences; i++) {
+ if (array->fences[i]->context != context)
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(dma_fence_match_context);
+
+struct dma_fence *dma_fence_array_first(struct dma_fence *head)
+{
+ struct dma_fence_array *array;
+
+ if (!head)
+ return NULL;
+
+ array = to_dma_fence_array(head);
+ if (!array)
+ return head;
+
+ if (!array->num_fences)
+ return NULL;
+
+ return array->fences[0];
+}
+EXPORT_SYMBOL(dma_fence_array_first);
+
+struct dma_fence *dma_fence_array_next(struct dma_fence *head,
+ unsigned int index)
+{
+ struct dma_fence_array *array = to_dma_fence_array(head);
+
+ if (!array || index >= array->num_fences)
+ return NULL;
+
+ return array->fences[index];
+}
+EXPORT_SYMBOL(dma_fence_array_next);
diff --git a/drivers/dma-buf/dma-fence-chain.c b/drivers/dma-buf/dma-fence-chain.c
new file mode 100644
index 000000000..a0d920576
--- /dev/null
+++ b/drivers/dma-buf/dma-fence-chain.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * fence-chain: chain fences together in a timeline
+ *
+ * Copyright (C) 2018 Advanced Micro Devices, Inc.
+ * Authors:
+ * Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/dma-fence-chain.h>
+
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence);
+
+/**
+ * dma_fence_chain_get_prev - use RCU to get a reference to the previous fence
+ * @chain: chain node to get the previous node from
+ *
+ * Use dma_fence_get_rcu_safe to get a reference to the previous fence of the
+ * chain node.
+ */
+static struct dma_fence *dma_fence_chain_get_prev(struct dma_fence_chain *chain)
+{
+ struct dma_fence *prev;
+
+ rcu_read_lock();
+ prev = dma_fence_get_rcu_safe(&chain->prev);
+ rcu_read_unlock();
+ return prev;
+}
+
+/**
+ * dma_fence_chain_walk - chain walking function
+ * @fence: current chain node
+ *
+ * Walk the chain to the next node. Returns the next fence or NULL if we are at
+ * the end of the chain. Garbage collects chain nodes which are already
+ * signaled.
+ */
+struct dma_fence *dma_fence_chain_walk(struct dma_fence *fence)
+{
+ struct dma_fence_chain *chain, *prev_chain;
+ struct dma_fence *prev, *replacement, *tmp;
+
+ chain = to_dma_fence_chain(fence);
+ if (!chain) {
+ dma_fence_put(fence);
+ return NULL;
+ }
+
+ while ((prev = dma_fence_chain_get_prev(chain))) {
+
+ prev_chain = to_dma_fence_chain(prev);
+ if (prev_chain) {
+ if (!dma_fence_is_signaled(prev_chain->fence))
+ break;
+
+ replacement = dma_fence_chain_get_prev(prev_chain);
+ } else {
+ if (!dma_fence_is_signaled(prev))
+ break;
+
+ replacement = NULL;
+ }
+
+ tmp = unrcu_pointer(cmpxchg(&chain->prev, RCU_INITIALIZER(prev),
+ RCU_INITIALIZER(replacement)));
+ if (tmp == prev)
+ dma_fence_put(tmp);
+ else
+ dma_fence_put(replacement);
+ dma_fence_put(prev);
+ }
+
+ dma_fence_put(fence);
+ return prev;
+}
+EXPORT_SYMBOL(dma_fence_chain_walk);
+
+/**
+ * dma_fence_chain_find_seqno - find fence chain node by seqno
+ * @pfence: pointer to the chain node where to start
+ * @seqno: the sequence number to search for
+ *
+ * Advance the fence pointer to the chain node which will signal this sequence
+ * number. If no sequence number is provided then this is a no-op.
+ *
+ * Returns EINVAL if the fence is not a chain node or the sequence number has
+ * not yet advanced far enough.
+ */
+int dma_fence_chain_find_seqno(struct dma_fence **pfence, uint64_t seqno)
+{
+ struct dma_fence_chain *chain;
+
+ if (!seqno)
+ return 0;
+
+ chain = to_dma_fence_chain(*pfence);
+ if (!chain || chain->base.seqno < seqno)
+ return -EINVAL;
+
+ dma_fence_chain_for_each(*pfence, &chain->base) {
+ if ((*pfence)->context != chain->base.context ||
+ to_dma_fence_chain(*pfence)->prev_seqno < seqno)
+ break;
+ }
+ dma_fence_put(&chain->base);
+
+ return 0;
+}
+EXPORT_SYMBOL(dma_fence_chain_find_seqno);
+
+static const char *dma_fence_chain_get_driver_name(struct dma_fence *fence)
+{
+ return "dma_fence_chain";
+}
+
+static const char *dma_fence_chain_get_timeline_name(struct dma_fence *fence)
+{
+ return "unbound";
+}
+
+static void dma_fence_chain_irq_work(struct irq_work *work)
+{
+ struct dma_fence_chain *chain;
+
+ chain = container_of(work, typeof(*chain), work);
+
+ /* Try to rearm the callback */
+ if (!dma_fence_chain_enable_signaling(&chain->base))
+ /* Ok, we are done. No more unsignaled fences left */
+ dma_fence_signal(&chain->base);
+ dma_fence_put(&chain->base);
+}
+
+static void dma_fence_chain_cb(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ struct dma_fence_chain *chain;
+
+ chain = container_of(cb, typeof(*chain), cb);
+ init_irq_work(&chain->work, dma_fence_chain_irq_work);
+ irq_work_queue(&chain->work);
+ dma_fence_put(f);
+}
+
+static bool dma_fence_chain_enable_signaling(struct dma_fence *fence)
+{
+ struct dma_fence_chain *head = to_dma_fence_chain(fence);
+
+ dma_fence_get(&head->base);
+ dma_fence_chain_for_each(fence, &head->base) {
+ struct dma_fence *f = dma_fence_chain_contained(fence);
+
+ dma_fence_get(f);
+ if (!dma_fence_add_callback(f, &head->cb, dma_fence_chain_cb)) {
+ dma_fence_put(fence);
+ return true;
+ }
+ dma_fence_put(f);
+ }
+ dma_fence_put(&head->base);
+ return false;
+}
+
+static bool dma_fence_chain_signaled(struct dma_fence *fence)
+{
+ dma_fence_chain_for_each(fence, fence) {
+ struct dma_fence *f = dma_fence_chain_contained(fence);
+
+ if (!dma_fence_is_signaled(f)) {
+ dma_fence_put(fence);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void dma_fence_chain_release(struct dma_fence *fence)
+{
+ struct dma_fence_chain *chain = to_dma_fence_chain(fence);
+ struct dma_fence *prev;
+
+ /* Manually unlink the chain as much as possible to avoid recursion
+ * and potential stack overflow.
+ */
+ while ((prev = rcu_dereference_protected(chain->prev, true))) {
+ struct dma_fence_chain *prev_chain;
+
+ if (kref_read(&prev->refcount) > 1)
+ break;
+
+ prev_chain = to_dma_fence_chain(prev);
+ if (!prev_chain)
+ break;
+
+ /* No need for atomic operations since we hold the last
+ * reference to prev_chain.
+ */
+ chain->prev = prev_chain->prev;
+ RCU_INIT_POINTER(prev_chain->prev, NULL);
+ dma_fence_put(prev);
+ }
+ dma_fence_put(prev);
+
+ dma_fence_put(chain->fence);
+ dma_fence_free(fence);
+}
+
+const struct dma_fence_ops dma_fence_chain_ops = {
+ .use_64bit_seqno = true,
+ .get_driver_name = dma_fence_chain_get_driver_name,
+ .get_timeline_name = dma_fence_chain_get_timeline_name,
+ .enable_signaling = dma_fence_chain_enable_signaling,
+ .signaled = dma_fence_chain_signaled,
+ .release = dma_fence_chain_release,
+};
+EXPORT_SYMBOL(dma_fence_chain_ops);
+
+/**
+ * dma_fence_chain_init - initialize a fence chain
+ * @chain: the chain node to initialize
+ * @prev: the previous fence
+ * @fence: the current fence
+ * @seqno: the sequence number to use for the fence chain
+ *
+ * Initialize a new chain node and either start a new chain or add the node to
+ * the existing chain of the previous fence.
+ */
+void dma_fence_chain_init(struct dma_fence_chain *chain,
+ struct dma_fence *prev,
+ struct dma_fence *fence,
+ uint64_t seqno)
+{
+ struct dma_fence_chain *prev_chain = to_dma_fence_chain(prev);
+ uint64_t context;
+
+ spin_lock_init(&chain->lock);
+ rcu_assign_pointer(chain->prev, prev);
+ chain->fence = fence;
+ chain->prev_seqno = 0;
+
+ /* Try to reuse the context of the previous chain node. */
+ if (prev_chain && __dma_fence_is_later(seqno, prev->seqno, prev->ops)) {
+ context = prev->context;
+ chain->prev_seqno = prev->seqno;
+ } else {
+ context = dma_fence_context_alloc(1);
+ /* Make sure that we always have a valid sequence number. */
+ if (prev_chain)
+ seqno = max(prev->seqno, seqno);
+ }
+
+ dma_fence_init(&chain->base, &dma_fence_chain_ops,
+ &chain->lock, context, seqno);
+
+ /*
+ * Chaining dma_fence_chain container together is only allowed through
+ * the prev fence and not through the contained fence.
+ *
+ * The correct way of handling this is to flatten out the fence
+ * structure into a dma_fence_array by the caller instead.
+ */
+ WARN_ON(dma_fence_is_chain(fence));
+}
+EXPORT_SYMBOL(dma_fence_chain_init);
diff --git a/drivers/dma-buf/dma-fence-unwrap.c b/drivers/dma-buf/dma-fence-unwrap.c
new file mode 100644
index 000000000..628af51c8
--- /dev/null
+++ b/drivers/dma-buf/dma-fence-unwrap.c
@@ -0,0 +1,176 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * dma-fence-util: misc functions for dma_fence objects
+ *
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ * Authors:
+ * Christian König <christian.koenig@amd.com>
+ */
+
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-array.h>
+#include <linux/dma-fence-chain.h>
+#include <linux/dma-fence-unwrap.h>
+#include <linux/slab.h>
+
+/* Internal helper to start new array iteration, don't use directly */
+static struct dma_fence *
+__dma_fence_unwrap_array(struct dma_fence_unwrap *cursor)
+{
+ cursor->array = dma_fence_chain_contained(cursor->chain);
+ cursor->index = 0;
+ return dma_fence_array_first(cursor->array);
+}
+
+/**
+ * dma_fence_unwrap_first - return the first fence from fence containers
+ * @head: the entrypoint into the containers
+ * @cursor: current position inside the containers
+ *
+ * Unwraps potential dma_fence_chain/dma_fence_array containers and return the
+ * first fence.
+ */
+struct dma_fence *dma_fence_unwrap_first(struct dma_fence *head,
+ struct dma_fence_unwrap *cursor)
+{
+ cursor->chain = dma_fence_get(head);
+ return __dma_fence_unwrap_array(cursor);
+}
+EXPORT_SYMBOL_GPL(dma_fence_unwrap_first);
+
+/**
+ * dma_fence_unwrap_next - return the next fence from a fence containers
+ * @cursor: current position inside the containers
+ *
+ * Continue unwrapping the dma_fence_chain/dma_fence_array containers and return
+ * the next fence from them.
+ */
+struct dma_fence *dma_fence_unwrap_next(struct dma_fence_unwrap *cursor)
+{
+ struct dma_fence *tmp;
+
+ ++cursor->index;
+ tmp = dma_fence_array_next(cursor->array, cursor->index);
+ if (tmp)
+ return tmp;
+
+ cursor->chain = dma_fence_chain_walk(cursor->chain);
+ return __dma_fence_unwrap_array(cursor);
+}
+EXPORT_SYMBOL_GPL(dma_fence_unwrap_next);
+
+/* Implementation for the dma_fence_merge() marco, don't use directly */
+struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
+ struct dma_fence **fences,
+ struct dma_fence_unwrap *iter)
+{
+ struct dma_fence_array *result;
+ struct dma_fence *tmp, **array;
+ ktime_t timestamp;
+ unsigned int i;
+ size_t count;
+
+ count = 0;
+ timestamp = ns_to_ktime(0);
+ for (i = 0; i < num_fences; ++i) {
+ dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
+ if (!dma_fence_is_signaled(tmp)) {
+ ++count;
+ } else {
+ ktime_t t = dma_fence_timestamp(tmp);
+
+ if (ktime_after(t, timestamp))
+ timestamp = t;
+ }
+ }
+ }
+
+ /*
+ * If we couldn't find a pending fence just return a private signaled
+ * fence with the timestamp of the last signaled one.
+ */
+ if (count == 0)
+ return dma_fence_allocate_private_stub(timestamp);
+
+ array = kmalloc_array(count, sizeof(*array), GFP_KERNEL);
+ if (!array)
+ return NULL;
+
+ /*
+ * This trashes the input fence array and uses it as position for the
+ * following merge loop. This works because the dma_fence_merge()
+ * wrapper macro is creating this temporary array on the stack together
+ * with the iterators.
+ */
+ for (i = 0; i < num_fences; ++i)
+ fences[i] = dma_fence_unwrap_first(fences[i], &iter[i]);
+
+ count = 0;
+ do {
+ unsigned int sel;
+
+restart:
+ tmp = NULL;
+ for (i = 0; i < num_fences; ++i) {
+ struct dma_fence *next;
+
+ while (fences[i] && dma_fence_is_signaled(fences[i]))
+ fences[i] = dma_fence_unwrap_next(&iter[i]);
+
+ next = fences[i];
+ if (!next)
+ continue;
+
+ /*
+ * We can't guarantee that inpute fences are ordered by
+ * context, but it is still quite likely when this
+ * function is used multiple times. So attempt to order
+ * the fences by context as we pass over them and merge
+ * fences with the same context.
+ */
+ if (!tmp || tmp->context > next->context) {
+ tmp = next;
+ sel = i;
+
+ } else if (tmp->context < next->context) {
+ continue;
+
+ } else if (dma_fence_is_later(tmp, next)) {
+ fences[i] = dma_fence_unwrap_next(&iter[i]);
+ goto restart;
+ } else {
+ fences[sel] = dma_fence_unwrap_next(&iter[sel]);
+ goto restart;
+ }
+ }
+
+ if (tmp) {
+ array[count++] = dma_fence_get(tmp);
+ fences[sel] = dma_fence_unwrap_next(&iter[sel]);
+ }
+ } while (tmp);
+
+ if (count == 0) {
+ tmp = dma_fence_allocate_private_stub(ktime_get());
+ goto return_tmp;
+ }
+
+ if (count == 1) {
+ tmp = array[0];
+ goto return_tmp;
+ }
+
+ result = dma_fence_array_create(count, array,
+ dma_fence_context_alloc(1),
+ 1, false);
+ if (!result) {
+ tmp = NULL;
+ goto return_tmp;
+ }
+ return &result->base;
+
+return_tmp:
+ kfree(array);
+ return tmp;
+}
+EXPORT_SYMBOL_GPL(__dma_fence_unwrap_merge);
diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
new file mode 100644
index 000000000..eef4786aa
--- /dev/null
+++ b/drivers/dma-buf/dma-fence.c
@@ -0,0 +1,965 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Fence mechanism for dma-buf and to allow for asynchronous dma access
+ *
+ * Copyright (C) 2012 Canonical Ltd
+ * Copyright (C) 2012 Texas Instruments
+ *
+ * Authors:
+ * Rob Clark <robdclark@gmail.com>
+ * Maarten Lankhorst <maarten.lankhorst@canonical.com>
+ */
+
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/atomic.h>
+#include <linux/dma-fence.h>
+#include <linux/sched/signal.h>
+#include <linux/seq_file.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/dma_fence.h>
+
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_emit);
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_enable_signal);
+EXPORT_TRACEPOINT_SYMBOL(dma_fence_signaled);
+
+static DEFINE_SPINLOCK(dma_fence_stub_lock);
+static struct dma_fence dma_fence_stub;
+
+/*
+ * fence context counter: each execution context should have its own
+ * fence context, this allows checking if fences belong to the same
+ * context or not. One device can have multiple separate contexts,
+ * and they're used if some engine can run independently of another.
+ */
+static atomic64_t dma_fence_context_counter = ATOMIC64_INIT(1);
+
+/**
+ * DOC: DMA fences overview
+ *
+ * DMA fences, represented by &struct dma_fence, are the kernel internal
+ * synchronization primitive for DMA operations like GPU rendering, video
+ * encoding/decoding, or displaying buffers on a screen.
+ *
+ * A fence is initialized using dma_fence_init() and completed using
+ * dma_fence_signal(). Fences are associated with a context, allocated through
+ * dma_fence_context_alloc(), and all fences on the same context are
+ * fully ordered.
+ *
+ * Since the purposes of fences is to facilitate cross-device and
+ * cross-application synchronization, there's multiple ways to use one:
+ *
+ * - Individual fences can be exposed as a &sync_file, accessed as a file
+ * descriptor from userspace, created by calling sync_file_create(). This is
+ * called explicit fencing, since userspace passes around explicit
+ * synchronization points.
+ *
+ * - Some subsystems also have their own explicit fencing primitives, like
+ * &drm_syncobj. Compared to &sync_file, a &drm_syncobj allows the underlying
+ * fence to be updated.
+ *
+ * - Then there's also implicit fencing, where the synchronization points are
+ * implicitly passed around as part of shared &dma_buf instances. Such
+ * implicit fences are stored in &struct dma_resv through the
+ * &dma_buf.resv pointer.
+ */
+
+/**
+ * DOC: fence cross-driver contract
+ *
+ * Since &dma_fence provide a cross driver contract, all drivers must follow the
+ * same rules:
+ *
+ * * Fences must complete in a reasonable time. Fences which represent kernels
+ * and shaders submitted by userspace, which could run forever, must be backed
+ * up by timeout and gpu hang recovery code. Minimally that code must prevent
+ * further command submission and force complete all in-flight fences, e.g.
+ * when the driver or hardware do not support gpu reset, or if the gpu reset
+ * failed for some reason. Ideally the driver supports gpu recovery which only
+ * affects the offending userspace context, and no other userspace
+ * submissions.
+ *
+ * * Drivers may have different ideas of what completion within a reasonable
+ * time means. Some hang recovery code uses a fixed timeout, others a mix
+ * between observing forward progress and increasingly strict timeouts.
+ * Drivers should not try to second guess timeout handling of fences from
+ * other drivers.
+ *
+ * * To ensure there's no deadlocks of dma_fence_wait() against other locks
+ * drivers should annotate all code required to reach dma_fence_signal(),
+ * which completes the fences, with dma_fence_begin_signalling() and
+ * dma_fence_end_signalling().
+ *
+ * * Drivers are allowed to call dma_fence_wait() while holding dma_resv_lock().
+ * This means any code required for fence completion cannot acquire a
+ * &dma_resv lock. Note that this also pulls in the entire established
+ * locking hierarchy around dma_resv_lock() and dma_resv_unlock().
+ *
+ * * Drivers are allowed to call dma_fence_wait() from their &shrinker
+ * callbacks. This means any code required for fence completion cannot
+ * allocate memory with GFP_KERNEL.
+ *
+ * * Drivers are allowed to call dma_fence_wait() from their &mmu_notifier
+ * respectively &mmu_interval_notifier callbacks. This means any code required
+ * for fence completeion cannot allocate memory with GFP_NOFS or GFP_NOIO.
+ * Only GFP_ATOMIC is permissible, which might fail.
+ *
+ * Note that only GPU drivers have a reasonable excuse for both requiring
+ * &mmu_interval_notifier and &shrinker callbacks at the same time as having to
+ * track asynchronous compute work using &dma_fence. No driver outside of
+ * drivers/gpu should ever call dma_fence_wait() in such contexts.
+ */
+
+static const char *dma_fence_stub_get_name(struct dma_fence *fence)
+{
+ return "stub";
+}
+
+static const struct dma_fence_ops dma_fence_stub_ops = {
+ .get_driver_name = dma_fence_stub_get_name,
+ .get_timeline_name = dma_fence_stub_get_name,
+};
+
+/**
+ * dma_fence_get_stub - return a signaled fence
+ *
+ * Return a stub fence which is already signaled. The fence's
+ * timestamp corresponds to the first time after boot this
+ * function is called.
+ */
+struct dma_fence *dma_fence_get_stub(void)
+{
+ spin_lock(&dma_fence_stub_lock);
+ if (!dma_fence_stub.ops) {
+ dma_fence_init(&dma_fence_stub,
+ &dma_fence_stub_ops,
+ &dma_fence_stub_lock,
+ 0, 0);
+
+ set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &dma_fence_stub.flags);
+
+ dma_fence_signal_locked(&dma_fence_stub);
+ }
+ spin_unlock(&dma_fence_stub_lock);
+
+ return dma_fence_get(&dma_fence_stub);
+}
+EXPORT_SYMBOL(dma_fence_get_stub);
+
+/**
+ * dma_fence_allocate_private_stub - return a private, signaled fence
+ * @timestamp: timestamp when the fence was signaled
+ *
+ * Return a newly allocated and signaled stub fence.
+ */
+struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp)
+{
+ struct dma_fence *fence;
+
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (fence == NULL)
+ return NULL;
+
+ dma_fence_init(fence,
+ &dma_fence_stub_ops,
+ &dma_fence_stub_lock,
+ 0, 0);
+
+ set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &fence->flags);
+
+ dma_fence_signal_timestamp(fence, timestamp);
+
+ return fence;
+}
+EXPORT_SYMBOL(dma_fence_allocate_private_stub);
+
+/**
+ * dma_fence_context_alloc - allocate an array of fence contexts
+ * @num: amount of contexts to allocate
+ *
+ * This function will return the first index of the number of fence contexts
+ * allocated. The fence context is used for setting &dma_fence.context to a
+ * unique number by passing the context to dma_fence_init().
+ */
+u64 dma_fence_context_alloc(unsigned num)
+{
+ WARN_ON(!num);
+ return atomic64_fetch_add(num, &dma_fence_context_counter);
+}
+EXPORT_SYMBOL(dma_fence_context_alloc);
+
+/**
+ * DOC: fence signalling annotation
+ *
+ * Proving correctness of all the kernel code around &dma_fence through code
+ * review and testing is tricky for a few reasons:
+ *
+ * * It is a cross-driver contract, and therefore all drivers must follow the
+ * same rules for lock nesting order, calling contexts for various functions
+ * and anything else significant for in-kernel interfaces. But it is also
+ * impossible to test all drivers in a single machine, hence brute-force N vs.
+ * N testing of all combinations is impossible. Even just limiting to the
+ * possible combinations is infeasible.
+ *
+ * * There is an enormous amount of driver code involved. For render drivers
+ * there's the tail of command submission, after fences are published,
+ * scheduler code, interrupt and workers to process job completion,
+ * and timeout, gpu reset and gpu hang recovery code. Plus for integration
+ * with core mm with have &mmu_notifier, respectively &mmu_interval_notifier,
+ * and &shrinker. For modesetting drivers there's the commit tail functions
+ * between when fences for an atomic modeset are published, and when the
+ * corresponding vblank completes, including any interrupt processing and
+ * related workers. Auditing all that code, across all drivers, is not
+ * feasible.
+ *
+ * * Due to how many other subsystems are involved and the locking hierarchies
+ * this pulls in there is extremely thin wiggle-room for driver-specific
+ * differences. &dma_fence interacts with almost all of the core memory
+ * handling through page fault handlers via &dma_resv, dma_resv_lock() and
+ * dma_resv_unlock(). On the other side it also interacts through all
+ * allocation sites through &mmu_notifier and &shrinker.
+ *
+ * Furthermore lockdep does not handle cross-release dependencies, which means
+ * any deadlocks between dma_fence_wait() and dma_fence_signal() can't be caught
+ * at runtime with some quick testing. The simplest example is one thread
+ * waiting on a &dma_fence while holding a lock::
+ *
+ * lock(A);
+ * dma_fence_wait(B);
+ * unlock(A);
+ *
+ * while the other thread is stuck trying to acquire the same lock, which
+ * prevents it from signalling the fence the previous thread is stuck waiting
+ * on::
+ *
+ * lock(A);
+ * unlock(A);
+ * dma_fence_signal(B);
+ *
+ * By manually annotating all code relevant to signalling a &dma_fence we can
+ * teach lockdep about these dependencies, which also helps with the validation
+ * headache since now lockdep can check all the rules for us::
+ *
+ * cookie = dma_fence_begin_signalling();
+ * lock(A);
+ * unlock(A);
+ * dma_fence_signal(B);
+ * dma_fence_end_signalling(cookie);
+ *
+ * For using dma_fence_begin_signalling() and dma_fence_end_signalling() to
+ * annotate critical sections the following rules need to be observed:
+ *
+ * * All code necessary to complete a &dma_fence must be annotated, from the
+ * point where a fence is accessible to other threads, to the point where
+ * dma_fence_signal() is called. Un-annotated code can contain deadlock issues,
+ * and due to the very strict rules and many corner cases it is infeasible to
+ * catch these just with review or normal stress testing.
+ *
+ * * &struct dma_resv deserves a special note, since the readers are only
+ * protected by rcu. This means the signalling critical section starts as soon
+ * as the new fences are installed, even before dma_resv_unlock() is called.
+ *
+ * * The only exception are fast paths and opportunistic signalling code, which
+ * calls dma_fence_signal() purely as an optimization, but is not required to
+ * guarantee completion of a &dma_fence. The usual example is a wait IOCTL
+ * which calls dma_fence_signal(), while the mandatory completion path goes
+ * through a hardware interrupt and possible job completion worker.
+ *
+ * * To aid composability of code, the annotations can be freely nested, as long
+ * as the overall locking hierarchy is consistent. The annotations also work
+ * both in interrupt and process context. Due to implementation details this
+ * requires that callers pass an opaque cookie from
+ * dma_fence_begin_signalling() to dma_fence_end_signalling().
+ *
+ * * Validation against the cross driver contract is implemented by priming
+ * lockdep with the relevant hierarchy at boot-up. This means even just
+ * testing with a single device is enough to validate a driver, at least as
+ * far as deadlocks with dma_fence_wait() against dma_fence_signal() are
+ * concerned.
+ */
+#ifdef CONFIG_LOCKDEP
+static struct lockdep_map dma_fence_lockdep_map = {
+ .name = "dma_fence_map"
+};
+
+/**
+ * dma_fence_begin_signalling - begin a critical DMA fence signalling section
+ *
+ * Drivers should use this to annotate the beginning of any code section
+ * required to eventually complete &dma_fence by calling dma_fence_signal().
+ *
+ * The end of these critical sections are annotated with
+ * dma_fence_end_signalling().
+ *
+ * Returns:
+ *
+ * Opaque cookie needed by the implementation, which needs to be passed to
+ * dma_fence_end_signalling().
+ */
+bool dma_fence_begin_signalling(void)
+{
+ /* explicitly nesting ... */
+ if (lock_is_held_type(&dma_fence_lockdep_map, 1))
+ return true;
+
+ /* rely on might_sleep check for soft/hardirq locks */
+ if (in_atomic())
+ return true;
+
+ /* ... and non-recursive readlock */
+ lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _RET_IP_);
+
+ return false;
+}
+EXPORT_SYMBOL(dma_fence_begin_signalling);
+
+/**
+ * dma_fence_end_signalling - end a critical DMA fence signalling section
+ * @cookie: opaque cookie from dma_fence_begin_signalling()
+ *
+ * Closes a critical section annotation opened by dma_fence_begin_signalling().
+ */
+void dma_fence_end_signalling(bool cookie)
+{
+ if (cookie)
+ return;
+
+ lock_release(&dma_fence_lockdep_map, _RET_IP_);
+}
+EXPORT_SYMBOL(dma_fence_end_signalling);
+
+void __dma_fence_might_wait(void)
+{
+ bool tmp;
+
+ tmp = lock_is_held_type(&dma_fence_lockdep_map, 1);
+ if (tmp)
+ lock_release(&dma_fence_lockdep_map, _THIS_IP_);
+ lock_map_acquire(&dma_fence_lockdep_map);
+ lock_map_release(&dma_fence_lockdep_map);
+ if (tmp)
+ lock_acquire(&dma_fence_lockdep_map, 0, 0, 1, 1, NULL, _THIS_IP_);
+}
+#endif
+
+
+/**
+ * dma_fence_signal_timestamp_locked - signal completion of a fence
+ * @fence: the fence to signal
+ * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
+ *
+ * Signal completion for software callbacks on a fence, this will unblock
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
+ * can only go from the unsignaled to the signaled state and not back, it will
+ * only be effective the first time. Set the timestamp provided as the fence
+ * signal timestamp.
+ *
+ * Unlike dma_fence_signal_timestamp(), this function must be called with
+ * &dma_fence.lock held.
+ *
+ * Returns 0 on success and a negative error value when @fence has been
+ * signalled already.
+ */
+int dma_fence_signal_timestamp_locked(struct dma_fence *fence,
+ ktime_t timestamp)
+{
+ struct dma_fence_cb *cur, *tmp;
+ struct list_head cb_list;
+
+ lockdep_assert_held(fence->lock);
+
+ if (unlikely(test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
+ &fence->flags)))
+ return -EINVAL;
+
+ /* Stash the cb_list before replacing it with the timestamp */
+ list_replace(&fence->cb_list, &cb_list);
+
+ fence->timestamp = timestamp;
+ set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
+ trace_dma_fence_signaled(fence);
+
+ list_for_each_entry_safe(cur, tmp, &cb_list, node) {
+ INIT_LIST_HEAD(&cur->node);
+ cur->func(fence, cur);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(dma_fence_signal_timestamp_locked);
+
+/**
+ * dma_fence_signal_timestamp - signal completion of a fence
+ * @fence: the fence to signal
+ * @timestamp: fence signal timestamp in kernel's CLOCK_MONOTONIC time domain
+ *
+ * Signal completion for software callbacks on a fence, this will unblock
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
+ * can only go from the unsignaled to the signaled state and not back, it will
+ * only be effective the first time. Set the timestamp provided as the fence
+ * signal timestamp.
+ *
+ * Returns 0 on success and a negative error value when @fence has been
+ * signalled already.
+ */
+int dma_fence_signal_timestamp(struct dma_fence *fence, ktime_t timestamp)
+{
+ unsigned long flags;
+ int ret;
+
+ if (!fence)
+ return -EINVAL;
+
+ spin_lock_irqsave(fence->lock, flags);
+ ret = dma_fence_signal_timestamp_locked(fence, timestamp);
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(dma_fence_signal_timestamp);
+
+/**
+ * dma_fence_signal_locked - signal completion of a fence
+ * @fence: the fence to signal
+ *
+ * Signal completion for software callbacks on a fence, this will unblock
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
+ * can only go from the unsignaled to the signaled state and not back, it will
+ * only be effective the first time.
+ *
+ * Unlike dma_fence_signal(), this function must be called with &dma_fence.lock
+ * held.
+ *
+ * Returns 0 on success and a negative error value when @fence has been
+ * signalled already.
+ */
+int dma_fence_signal_locked(struct dma_fence *fence)
+{
+ return dma_fence_signal_timestamp_locked(fence, ktime_get());
+}
+EXPORT_SYMBOL(dma_fence_signal_locked);
+
+/**
+ * dma_fence_signal - signal completion of a fence
+ * @fence: the fence to signal
+ *
+ * Signal completion for software callbacks on a fence, this will unblock
+ * dma_fence_wait() calls and run all the callbacks added with
+ * dma_fence_add_callback(). Can be called multiple times, but since a fence
+ * can only go from the unsignaled to the signaled state and not back, it will
+ * only be effective the first time.
+ *
+ * Returns 0 on success and a negative error value when @fence has been
+ * signalled already.
+ */
+int dma_fence_signal(struct dma_fence *fence)
+{
+ unsigned long flags;
+ int ret;
+ bool tmp;
+
+ if (!fence)
+ return -EINVAL;
+
+ tmp = dma_fence_begin_signalling();
+
+ spin_lock_irqsave(fence->lock, flags);
+ ret = dma_fence_signal_timestamp_locked(fence, ktime_get());
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ dma_fence_end_signalling(tmp);
+
+ return ret;
+}
+EXPORT_SYMBOL(dma_fence_signal);
+
+/**
+ * dma_fence_wait_timeout - sleep until the fence gets signaled
+ * or until timeout elapses
+ * @fence: the fence to wait on
+ * @intr: if true, do an interruptible wait
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ *
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
+ * remaining timeout in jiffies on success. Other error values may be
+ * returned on custom implementations.
+ *
+ * Performs a synchronous wait on this fence. It is assumed the caller
+ * directly or indirectly (buf-mgr between reservation and committing)
+ * holds a reference to the fence, otherwise the fence might be
+ * freed before return, resulting in undefined behavior.
+ *
+ * See also dma_fence_wait() and dma_fence_wait_any_timeout().
+ */
+signed long
+dma_fence_wait_timeout(struct dma_fence *fence, bool intr, signed long timeout)
+{
+ signed long ret;
+
+ if (WARN_ON(timeout < 0))
+ return -EINVAL;
+
+ might_sleep();
+
+ __dma_fence_might_wait();
+
+ dma_fence_enable_sw_signaling(fence);
+
+ trace_dma_fence_wait_start(fence);
+ if (fence->ops->wait)
+ ret = fence->ops->wait(fence, intr, timeout);
+ else
+ ret = dma_fence_default_wait(fence, intr, timeout);
+ trace_dma_fence_wait_end(fence);
+ return ret;
+}
+EXPORT_SYMBOL(dma_fence_wait_timeout);
+
+/**
+ * dma_fence_release - default relese function for fences
+ * @kref: &dma_fence.recfount
+ *
+ * This is the default release functions for &dma_fence. Drivers shouldn't call
+ * this directly, but instead call dma_fence_put().
+ */
+void dma_fence_release(struct kref *kref)
+{
+ struct dma_fence *fence =
+ container_of(kref, struct dma_fence, refcount);
+
+ trace_dma_fence_destroy(fence);
+
+ if (WARN(!list_empty(&fence->cb_list) &&
+ !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags),
+ "Fence %s:%s:%llx:%llx released with pending signals!\n",
+ fence->ops->get_driver_name(fence),
+ fence->ops->get_timeline_name(fence),
+ fence->context, fence->seqno)) {
+ unsigned long flags;
+
+ /*
+ * Failed to signal before release, likely a refcounting issue.
+ *
+ * This should never happen, but if it does make sure that we
+ * don't leave chains dangling. We set the error flag first
+ * so that the callbacks know this signal is due to an error.
+ */
+ spin_lock_irqsave(fence->lock, flags);
+ fence->error = -EDEADLK;
+ dma_fence_signal_locked(fence);
+ spin_unlock_irqrestore(fence->lock, flags);
+ }
+
+ if (fence->ops->release)
+ fence->ops->release(fence);
+ else
+ dma_fence_free(fence);
+}
+EXPORT_SYMBOL(dma_fence_release);
+
+/**
+ * dma_fence_free - default release function for &dma_fence.
+ * @fence: fence to release
+ *
+ * This is the default implementation for &dma_fence_ops.release. It calls
+ * kfree_rcu() on @fence.
+ */
+void dma_fence_free(struct dma_fence *fence)
+{
+ kfree_rcu(fence, rcu);
+}
+EXPORT_SYMBOL(dma_fence_free);
+
+static bool __dma_fence_enable_signaling(struct dma_fence *fence)
+{
+ bool was_set;
+
+ lockdep_assert_held(fence->lock);
+
+ was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+ &fence->flags);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return false;
+
+ if (!was_set && fence->ops->enable_signaling) {
+ trace_dma_fence_enable_signal(fence);
+
+ if (!fence->ops->enable_signaling(fence)) {
+ dma_fence_signal_locked(fence);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * dma_fence_enable_sw_signaling - enable signaling on fence
+ * @fence: the fence to enable
+ *
+ * This will request for sw signaling to be enabled, to make the fence
+ * complete as soon as possible. This calls &dma_fence_ops.enable_signaling
+ * internally.
+ */
+void dma_fence_enable_sw_signaling(struct dma_fence *fence)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(fence->lock, flags);
+ __dma_fence_enable_signaling(fence);
+ spin_unlock_irqrestore(fence->lock, flags);
+}
+EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
+
+/**
+ * dma_fence_add_callback - add a callback to be called when the fence
+ * is signaled
+ * @fence: the fence to wait on
+ * @cb: the callback to register
+ * @func: the function to call
+ *
+ * Add a software callback to the fence. The caller should keep a reference to
+ * the fence.
+ *
+ * @cb will be initialized by dma_fence_add_callback(), no initialization
+ * by the caller is required. Any number of callbacks can be registered
+ * to a fence, but a callback can only be registered to one fence at a time.
+ *
+ * If fence is already signaled, this function will return -ENOENT (and
+ * *not* call the callback).
+ *
+ * Note that the callback can be called from an atomic context or irq context.
+ *
+ * Returns 0 in case of success, -ENOENT if the fence is already signaled
+ * and -EINVAL in case of error.
+ */
+int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
+ dma_fence_func_t func)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ if (WARN_ON(!fence || !func))
+ return -EINVAL;
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ INIT_LIST_HEAD(&cb->node);
+ return -ENOENT;
+ }
+
+ spin_lock_irqsave(fence->lock, flags);
+
+ if (__dma_fence_enable_signaling(fence)) {
+ cb->func = func;
+ list_add_tail(&cb->node, &fence->cb_list);
+ } else {
+ INIT_LIST_HEAD(&cb->node);
+ ret = -ENOENT;
+ }
+
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(dma_fence_add_callback);
+
+/**
+ * dma_fence_get_status - returns the status upon completion
+ * @fence: the dma_fence to query
+ *
+ * This wraps dma_fence_get_status_locked() to return the error status
+ * condition on a signaled fence. See dma_fence_get_status_locked() for more
+ * details.
+ *
+ * Returns 0 if the fence has not yet been signaled, 1 if the fence has
+ * been signaled without an error condition, or a negative error code
+ * if the fence has been completed in err.
+ */
+int dma_fence_get_status(struct dma_fence *fence)
+{
+ unsigned long flags;
+ int status;
+
+ spin_lock_irqsave(fence->lock, flags);
+ status = dma_fence_get_status_locked(fence);
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ return status;
+}
+EXPORT_SYMBOL(dma_fence_get_status);
+
+/**
+ * dma_fence_remove_callback - remove a callback from the signaling list
+ * @fence: the fence to wait on
+ * @cb: the callback to remove
+ *
+ * Remove a previously queued callback from the fence. This function returns
+ * true if the callback is successfully removed, or false if the fence has
+ * already been signaled.
+ *
+ * *WARNING*:
+ * Cancelling a callback should only be done if you really know what you're
+ * doing, since deadlocks and race conditions could occur all too easily. For
+ * this reason, it should only ever be done on hardware lockup recovery,
+ * with a reference held to the fence.
+ *
+ * Behaviour is undefined if @cb has not been added to @fence using
+ * dma_fence_add_callback() beforehand.
+ */
+bool
+dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ unsigned long flags;
+ bool ret;
+
+ spin_lock_irqsave(fence->lock, flags);
+
+ ret = !list_empty(&cb->node);
+ if (ret)
+ list_del_init(&cb->node);
+
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(dma_fence_remove_callback);
+
+struct default_wait_cb {
+ struct dma_fence_cb base;
+ struct task_struct *task;
+};
+
+static void
+dma_fence_default_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
+{
+ struct default_wait_cb *wait =
+ container_of(cb, struct default_wait_cb, base);
+
+ wake_up_state(wait->task, TASK_NORMAL);
+}
+
+/**
+ * dma_fence_default_wait - default sleep until the fence gets signaled
+ * or until timeout elapses
+ * @fence: the fence to wait on
+ * @intr: if true, do an interruptible wait
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ *
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or the
+ * remaining timeout in jiffies on success. If timeout is zero the value one is
+ * returned if the fence is already signaled for consistency with other
+ * functions taking a jiffies timeout.
+ */
+signed long
+dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
+{
+ struct default_wait_cb cb;
+ unsigned long flags;
+ signed long ret = timeout ? timeout : 1;
+
+ spin_lock_irqsave(fence->lock, flags);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ goto out;
+
+ if (intr && signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ goto out;
+ }
+
+ if (!timeout) {
+ ret = 0;
+ goto out;
+ }
+
+ cb.base.func = dma_fence_default_wait_cb;
+ cb.task = current;
+ list_add(&cb.base.node, &fence->cb_list);
+
+ while (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && ret > 0) {
+ if (intr)
+ __set_current_state(TASK_INTERRUPTIBLE);
+ else
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ ret = schedule_timeout(ret);
+
+ spin_lock_irqsave(fence->lock, flags);
+ if (ret > 0 && intr && signal_pending(current))
+ ret = -ERESTARTSYS;
+ }
+
+ if (!list_empty(&cb.base.node))
+ list_del(&cb.base.node);
+ __set_current_state(TASK_RUNNING);
+
+out:
+ spin_unlock_irqrestore(fence->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(dma_fence_default_wait);
+
+static bool
+dma_fence_test_signaled_any(struct dma_fence **fences, uint32_t count,
+ uint32_t *idx)
+{
+ int i;
+
+ for (i = 0; i < count; ++i) {
+ struct dma_fence *fence = fences[i];
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) {
+ if (idx)
+ *idx = i;
+ return true;
+ }
+ }
+ return false;
+}
+
+/**
+ * dma_fence_wait_any_timeout - sleep until any fence gets signaled
+ * or until timeout elapses
+ * @fences: array of fences to wait on
+ * @count: number of fences to wait on
+ * @intr: if true, do an interruptible wait
+ * @timeout: timeout value in jiffies, or MAX_SCHEDULE_TIMEOUT
+ * @idx: used to store the first signaled fence index, meaningful only on
+ * positive return
+ *
+ * Returns -EINVAL on custom fence wait implementation, -ERESTARTSYS if
+ * interrupted, 0 if the wait timed out, or the remaining timeout in jiffies
+ * on success.
+ *
+ * Synchronous waits for the first fence in the array to be signaled. The
+ * caller needs to hold a reference to all fences in the array, otherwise a
+ * fence might be freed before return, resulting in undefined behavior.
+ *
+ * See also dma_fence_wait() and dma_fence_wait_timeout().
+ */
+signed long
+dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t count,
+ bool intr, signed long timeout, uint32_t *idx)
+{
+ struct default_wait_cb *cb;
+ signed long ret = timeout;
+ unsigned i;
+
+ if (WARN_ON(!fences || !count || timeout < 0))
+ return -EINVAL;
+
+ if (timeout == 0) {
+ for (i = 0; i < count; ++i)
+ if (dma_fence_is_signaled(fences[i])) {
+ if (idx)
+ *idx = i;
+ return 1;
+ }
+
+ return 0;
+ }
+
+ cb = kcalloc(count, sizeof(struct default_wait_cb), GFP_KERNEL);
+ if (cb == NULL) {
+ ret = -ENOMEM;
+ goto err_free_cb;
+ }
+
+ for (i = 0; i < count; ++i) {
+ struct dma_fence *fence = fences[i];
+
+ cb[i].task = current;
+ if (dma_fence_add_callback(fence, &cb[i].base,
+ dma_fence_default_wait_cb)) {
+ /* This fence is already signaled */
+ if (idx)
+ *idx = i;
+ goto fence_rm_cb;
+ }
+ }
+
+ while (ret > 0) {
+ if (intr)
+ set_current_state(TASK_INTERRUPTIBLE);
+ else
+ set_current_state(TASK_UNINTERRUPTIBLE);
+
+ if (dma_fence_test_signaled_any(fences, count, idx))
+ break;
+
+ ret = schedule_timeout(ret);
+
+ if (ret > 0 && intr && signal_pending(current))
+ ret = -ERESTARTSYS;
+ }
+
+ __set_current_state(TASK_RUNNING);
+
+fence_rm_cb:
+ while (i-- > 0)
+ dma_fence_remove_callback(fences[i], &cb[i].base);
+
+err_free_cb:
+ kfree(cb);
+
+ return ret;
+}
+EXPORT_SYMBOL(dma_fence_wait_any_timeout);
+
+/**
+ * dma_fence_describe - Dump fence describtion into seq_file
+ * @fence: the 6fence to describe
+ * @seq: the seq_file to put the textual description into
+ *
+ * Dump a textual description of the fence and it's state into the seq_file.
+ */
+void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq)
+{
+ seq_printf(seq, "%s %s seq %llu %ssignalled\n",
+ fence->ops->get_driver_name(fence),
+ fence->ops->get_timeline_name(fence), fence->seqno,
+ dma_fence_is_signaled(fence) ? "" : "un");
+}
+EXPORT_SYMBOL(dma_fence_describe);
+
+/**
+ * dma_fence_init - Initialize a custom fence.
+ * @fence: the fence to initialize
+ * @ops: the dma_fence_ops for operations on this fence
+ * @lock: the irqsafe spinlock to use for locking this fence
+ * @context: the execution context this fence is run on
+ * @seqno: a linear increasing sequence number for this context
+ *
+ * Initializes an allocated fence, the caller doesn't have to keep its
+ * refcount after committing with this fence, but it will need to hold a
+ * refcount again if &dma_fence_ops.enable_signaling gets called.
+ *
+ * context and seqno are used for easy comparison between fences, allowing
+ * to check which fence is later by simply using dma_fence_later().
+ */
+void
+dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, u64 seqno)
+{
+ BUG_ON(!lock);
+ BUG_ON(!ops || !ops->get_driver_name || !ops->get_timeline_name);
+
+ kref_init(&fence->refcount);
+ fence->ops = ops;
+ INIT_LIST_HEAD(&fence->cb_list);
+ fence->lock = lock;
+ fence->context = context;
+ fence->seqno = seqno;
+ fence->flags = 0UL;
+ fence->error = 0;
+
+ trace_dma_fence_init(fence);
+}
+EXPORT_SYMBOL(dma_fence_init);
diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c
new file mode 100644
index 000000000..59d158873
--- /dev/null
+++ b/drivers/dma-buf/dma-heap.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Framework for userspace DMA-BUF allocations
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#include <linux/cdev.h>
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/err.h>
+#include <linux/xarray.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/nospec.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/dma-heap.h>
+#include <uapi/linux/dma-heap.h>
+
+#define DEVNAME "dma_heap"
+
+#define NUM_HEAP_MINORS 128
+
+/**
+ * struct dma_heap - represents a dmabuf heap in the system
+ * @name: used for debugging/device-node name
+ * @ops: ops struct for this heap
+ * @heap_devt heap device node
+ * @list list head connecting to list of heaps
+ * @heap_cdev heap char device
+ *
+ * Represents a heap of memory from which buffers can be made.
+ */
+struct dma_heap {
+ const char *name;
+ const struct dma_heap_ops *ops;
+ void *priv;
+ dev_t heap_devt;
+ struct list_head list;
+ struct cdev heap_cdev;
+};
+
+static LIST_HEAD(heap_list);
+static DEFINE_MUTEX(heap_list_lock);
+static dev_t dma_heap_devt;
+static struct class *dma_heap_class;
+static DEFINE_XARRAY_ALLOC(dma_heap_minors);
+
+static int dma_heap_buffer_alloc(struct dma_heap *heap, size_t len,
+ unsigned int fd_flags,
+ unsigned int heap_flags)
+{
+ struct dma_buf *dmabuf;
+ int fd;
+
+ /*
+ * Allocations from all heaps have to begin
+ * and end on page boundaries.
+ */
+ len = PAGE_ALIGN(len);
+ if (!len)
+ return -EINVAL;
+
+ dmabuf = heap->ops->allocate(heap, len, fd_flags, heap_flags);
+ if (IS_ERR(dmabuf))
+ return PTR_ERR(dmabuf);
+
+ fd = dma_buf_fd(dmabuf, fd_flags);
+ if (fd < 0) {
+ dma_buf_put(dmabuf);
+ /* just return, as put will call release and that will free */
+ }
+ return fd;
+}
+
+static int dma_heap_open(struct inode *inode, struct file *file)
+{
+ struct dma_heap *heap;
+
+ heap = xa_load(&dma_heap_minors, iminor(inode));
+ if (!heap) {
+ pr_err("dma_heap: minor %d unknown.\n", iminor(inode));
+ return -ENODEV;
+ }
+
+ /* instance data as context */
+ file->private_data = heap;
+ nonseekable_open(inode, file);
+
+ return 0;
+}
+
+static long dma_heap_ioctl_allocate(struct file *file, void *data)
+{
+ struct dma_heap_allocation_data *heap_allocation = data;
+ struct dma_heap *heap = file->private_data;
+ int fd;
+
+ if (heap_allocation->fd)
+ return -EINVAL;
+
+ if (heap_allocation->fd_flags & ~DMA_HEAP_VALID_FD_FLAGS)
+ return -EINVAL;
+
+ if (heap_allocation->heap_flags & ~DMA_HEAP_VALID_HEAP_FLAGS)
+ return -EINVAL;
+
+ fd = dma_heap_buffer_alloc(heap, heap_allocation->len,
+ heap_allocation->fd_flags,
+ heap_allocation->heap_flags);
+ if (fd < 0)
+ return fd;
+
+ heap_allocation->fd = fd;
+
+ return 0;
+}
+
+static unsigned int dma_heap_ioctl_cmds[] = {
+ DMA_HEAP_IOCTL_ALLOC,
+};
+
+static long dma_heap_ioctl(struct file *file, unsigned int ucmd,
+ unsigned long arg)
+{
+ char stack_kdata[128];
+ char *kdata = stack_kdata;
+ unsigned int kcmd;
+ unsigned int in_size, out_size, drv_size, ksize;
+ int nr = _IOC_NR(ucmd);
+ int ret = 0;
+
+ if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds))
+ return -EINVAL;
+
+ nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds));
+ /* Get the kernel ioctl cmd that matches */
+ kcmd = dma_heap_ioctl_cmds[nr];
+
+ /* Figure out the delta between user cmd size and kernel cmd size */
+ drv_size = _IOC_SIZE(kcmd);
+ out_size = _IOC_SIZE(ucmd);
+ in_size = out_size;
+ if ((ucmd & kcmd & IOC_IN) == 0)
+ in_size = 0;
+ if ((ucmd & kcmd & IOC_OUT) == 0)
+ out_size = 0;
+ ksize = max(max(in_size, out_size), drv_size);
+
+ /* If necessary, allocate buffer for ioctl argument */
+ if (ksize > sizeof(stack_kdata)) {
+ kdata = kmalloc(ksize, GFP_KERNEL);
+ if (!kdata)
+ return -ENOMEM;
+ }
+
+ if (copy_from_user(kdata, (void __user *)arg, in_size) != 0) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ /* zero out any difference between the kernel/user structure size */
+ if (ksize > in_size)
+ memset(kdata + in_size, 0, ksize - in_size);
+
+ switch (kcmd) {
+ case DMA_HEAP_IOCTL_ALLOC:
+ ret = dma_heap_ioctl_allocate(file, kdata);
+ break;
+ default:
+ ret = -ENOTTY;
+ goto err;
+ }
+
+ if (copy_to_user((void __user *)arg, kdata, out_size) != 0)
+ ret = -EFAULT;
+err:
+ if (kdata != stack_kdata)
+ kfree(kdata);
+ return ret;
+}
+
+static const struct file_operations dma_heap_fops = {
+ .owner = THIS_MODULE,
+ .open = dma_heap_open,
+ .unlocked_ioctl = dma_heap_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = dma_heap_ioctl,
+#endif
+};
+
+/**
+ * dma_heap_get_drvdata() - get per-subdriver data for the heap
+ * @heap: DMA-Heap to retrieve private data for
+ *
+ * Returns:
+ * The per-subdriver data for the heap.
+ */
+void *dma_heap_get_drvdata(struct dma_heap *heap)
+{
+ return heap->priv;
+}
+
+/**
+ * dma_heap_get_name() - get heap name
+ * @heap: DMA-Heap to retrieve private data for
+ *
+ * Returns:
+ * The char* for the heap name.
+ */
+const char *dma_heap_get_name(struct dma_heap *heap)
+{
+ return heap->name;
+}
+
+struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info)
+{
+ struct dma_heap *heap, *h, *err_ret;
+ struct device *dev_ret;
+ unsigned int minor;
+ int ret;
+
+ if (!exp_info->name || !strcmp(exp_info->name, "")) {
+ pr_err("dma_heap: Cannot add heap without a name\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!exp_info->ops || !exp_info->ops->allocate) {
+ pr_err("dma_heap: Cannot add heap with invalid ops struct\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+
+ heap->name = exp_info->name;
+ heap->ops = exp_info->ops;
+ heap->priv = exp_info->priv;
+
+ /* Find unused minor number */
+ ret = xa_alloc(&dma_heap_minors, &minor, heap,
+ XA_LIMIT(0, NUM_HEAP_MINORS - 1), GFP_KERNEL);
+ if (ret < 0) {
+ pr_err("dma_heap: Unable to get minor number for heap\n");
+ err_ret = ERR_PTR(ret);
+ goto err0;
+ }
+
+ /* Create device */
+ heap->heap_devt = MKDEV(MAJOR(dma_heap_devt), minor);
+
+ cdev_init(&heap->heap_cdev, &dma_heap_fops);
+ ret = cdev_add(&heap->heap_cdev, heap->heap_devt, 1);
+ if (ret < 0) {
+ pr_err("dma_heap: Unable to add char device\n");
+ err_ret = ERR_PTR(ret);
+ goto err1;
+ }
+
+ dev_ret = device_create(dma_heap_class,
+ NULL,
+ heap->heap_devt,
+ NULL,
+ heap->name);
+ if (IS_ERR(dev_ret)) {
+ pr_err("dma_heap: Unable to create device\n");
+ err_ret = ERR_CAST(dev_ret);
+ goto err2;
+ }
+
+ mutex_lock(&heap_list_lock);
+ /* check the name is unique */
+ list_for_each_entry(h, &heap_list, list) {
+ if (!strcmp(h->name, exp_info->name)) {
+ mutex_unlock(&heap_list_lock);
+ pr_err("dma_heap: Already registered heap named %s\n",
+ exp_info->name);
+ err_ret = ERR_PTR(-EINVAL);
+ goto err3;
+ }
+ }
+
+ /* Add heap to the list */
+ list_add(&heap->list, &heap_list);
+ mutex_unlock(&heap_list_lock);
+
+ return heap;
+
+err3:
+ device_destroy(dma_heap_class, heap->heap_devt);
+err2:
+ cdev_del(&heap->heap_cdev);
+err1:
+ xa_erase(&dma_heap_minors, minor);
+err0:
+ kfree(heap);
+ return err_ret;
+}
+
+static char *dma_heap_devnode(struct device *dev, umode_t *mode)
+{
+ return kasprintf(GFP_KERNEL, "dma_heap/%s", dev_name(dev));
+}
+
+static int dma_heap_init(void)
+{
+ int ret;
+
+ ret = alloc_chrdev_region(&dma_heap_devt, 0, NUM_HEAP_MINORS, DEVNAME);
+ if (ret)
+ return ret;
+
+ dma_heap_class = class_create(THIS_MODULE, DEVNAME);
+ if (IS_ERR(dma_heap_class)) {
+ unregister_chrdev_region(dma_heap_devt, NUM_HEAP_MINORS);
+ return PTR_ERR(dma_heap_class);
+ }
+ dma_heap_class->devnode = dma_heap_devnode;
+
+ return 0;
+}
+subsys_initcall(dma_heap_init);
diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c
new file mode 100644
index 000000000..e78ff9333
--- /dev/null
+++ b/drivers/dma-buf/dma-resv.c
@@ -0,0 +1,779 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
+ *
+ * Based on bo.c which bears the following copyright notice,
+ * but is dual licensed:
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include <linux/dma-resv.h>
+#include <linux/dma-fence-array.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/sched/mm.h>
+#include <linux/mmu_notifier.h>
+#include <linux/seq_file.h>
+
+/**
+ * DOC: Reservation Object Overview
+ *
+ * The reservation object provides a mechanism to manage a container of
+ * dma_fence object associated with a resource. A reservation object
+ * can have any number of fences attaches to it. Each fence carries an usage
+ * parameter determining how the operation represented by the fence is using the
+ * resource. The RCU mechanism is used to protect read access to fences from
+ * locked write-side updates.
+ *
+ * See struct dma_resv for more details.
+ */
+
+DEFINE_WD_CLASS(reservation_ww_class);
+EXPORT_SYMBOL(reservation_ww_class);
+
+/* Mask for the lower fence pointer bits */
+#define DMA_RESV_LIST_MASK 0x3
+
+struct dma_resv_list {
+ struct rcu_head rcu;
+ u32 num_fences, max_fences;
+ struct dma_fence __rcu *table[];
+};
+
+/* Extract the fence and usage flags from an RCU protected entry in the list. */
+static void dma_resv_list_entry(struct dma_resv_list *list, unsigned int index,
+ struct dma_resv *resv, struct dma_fence **fence,
+ enum dma_resv_usage *usage)
+{
+ long tmp;
+
+ tmp = (long)rcu_dereference_check(list->table[index],
+ resv ? dma_resv_held(resv) : true);
+ *fence = (struct dma_fence *)(tmp & ~DMA_RESV_LIST_MASK);
+ if (usage)
+ *usage = tmp & DMA_RESV_LIST_MASK;
+}
+
+/* Set the fence and usage flags at the specific index in the list. */
+static void dma_resv_list_set(struct dma_resv_list *list,
+ unsigned int index,
+ struct dma_fence *fence,
+ enum dma_resv_usage usage)
+{
+ long tmp = ((long)fence) | usage;
+
+ RCU_INIT_POINTER(list->table[index], (struct dma_fence *)tmp);
+}
+
+/*
+ * Allocate a new dma_resv_list and make sure to correctly initialize
+ * max_fences.
+ */
+static struct dma_resv_list *dma_resv_list_alloc(unsigned int max_fences)
+{
+ struct dma_resv_list *list;
+
+ list = kmalloc(struct_size(list, table, max_fences), GFP_KERNEL);
+ if (!list)
+ return NULL;
+
+ list->max_fences = (ksize(list) - offsetof(typeof(*list), table)) /
+ sizeof(*list->table);
+
+ return list;
+}
+
+/* Free a dma_resv_list and make sure to drop all references. */
+static void dma_resv_list_free(struct dma_resv_list *list)
+{
+ unsigned int i;
+
+ if (!list)
+ return;
+
+ for (i = 0; i < list->num_fences; ++i) {
+ struct dma_fence *fence;
+
+ dma_resv_list_entry(list, i, NULL, &fence, NULL);
+ dma_fence_put(fence);
+ }
+ kfree_rcu(list, rcu);
+}
+
+/**
+ * dma_resv_init - initialize a reservation object
+ * @obj: the reservation object
+ */
+void dma_resv_init(struct dma_resv *obj)
+{
+ ww_mutex_init(&obj->lock, &reservation_ww_class);
+
+ RCU_INIT_POINTER(obj->fences, NULL);
+}
+EXPORT_SYMBOL(dma_resv_init);
+
+/**
+ * dma_resv_fini - destroys a reservation object
+ * @obj: the reservation object
+ */
+void dma_resv_fini(struct dma_resv *obj)
+{
+ /*
+ * This object should be dead and all references must have
+ * been released to it, so no need to be protected with rcu.
+ */
+ dma_resv_list_free(rcu_dereference_protected(obj->fences, true));
+ ww_mutex_destroy(&obj->lock);
+}
+EXPORT_SYMBOL(dma_resv_fini);
+
+/* Dereference the fences while ensuring RCU rules */
+static inline struct dma_resv_list *dma_resv_fences_list(struct dma_resv *obj)
+{
+ return rcu_dereference_check(obj->fences, dma_resv_held(obj));
+}
+
+/**
+ * dma_resv_reserve_fences - Reserve space to add fences to a dma_resv object.
+ * @obj: reservation object
+ * @num_fences: number of fences we want to add
+ *
+ * Should be called before dma_resv_add_fence(). Must be called with @obj
+ * locked through dma_resv_lock().
+ *
+ * Note that the preallocated slots need to be re-reserved if @obj is unlocked
+ * at any time before calling dma_resv_add_fence(). This is validated when
+ * CONFIG_DEBUG_MUTEXES is enabled.
+ *
+ * RETURNS
+ * Zero for success, or -errno
+ */
+int dma_resv_reserve_fences(struct dma_resv *obj, unsigned int num_fences)
+{
+ struct dma_resv_list *old, *new;
+ unsigned int i, j, k, max;
+
+ dma_resv_assert_held(obj);
+
+ old = dma_resv_fences_list(obj);
+ if (old && old->max_fences) {
+ if ((old->num_fences + num_fences) <= old->max_fences)
+ return 0;
+ max = max(old->num_fences + num_fences, old->max_fences * 2);
+ } else {
+ max = max(4ul, roundup_pow_of_two(num_fences));
+ }
+
+ new = dma_resv_list_alloc(max);
+ if (!new)
+ return -ENOMEM;
+
+ /*
+ * no need to bump fence refcounts, rcu_read access
+ * requires the use of kref_get_unless_zero, and the
+ * references from the old struct are carried over to
+ * the new.
+ */
+ for (i = 0, j = 0, k = max; i < (old ? old->num_fences : 0); ++i) {
+ enum dma_resv_usage usage;
+ struct dma_fence *fence;
+
+ dma_resv_list_entry(old, i, obj, &fence, &usage);
+ if (dma_fence_is_signaled(fence))
+ RCU_INIT_POINTER(new->table[--k], fence);
+ else
+ dma_resv_list_set(new, j++, fence, usage);
+ }
+ new->num_fences = j;
+
+ /*
+ * We are not changing the effective set of fences here so can
+ * merely update the pointer to the new array; both existing
+ * readers and new readers will see exactly the same set of
+ * active (unsignaled) fences. Individual fences and the
+ * old array are protected by RCU and so will not vanish under
+ * the gaze of the rcu_read_lock() readers.
+ */
+ rcu_assign_pointer(obj->fences, new);
+
+ if (!old)
+ return 0;
+
+ /* Drop the references to the signaled fences */
+ for (i = k; i < max; ++i) {
+ struct dma_fence *fence;
+
+ fence = rcu_dereference_protected(new->table[i],
+ dma_resv_held(obj));
+ dma_fence_put(fence);
+ }
+ kfree_rcu(old, rcu);
+
+ return 0;
+}
+EXPORT_SYMBOL(dma_resv_reserve_fences);
+
+#ifdef CONFIG_DEBUG_MUTEXES
+/**
+ * dma_resv_reset_max_fences - reset fences for debugging
+ * @obj: the dma_resv object to reset
+ *
+ * Reset the number of pre-reserved fence slots to test that drivers do
+ * correct slot allocation using dma_resv_reserve_fences(). See also
+ * &dma_resv_list.max_fences.
+ */
+void dma_resv_reset_max_fences(struct dma_resv *obj)
+{
+ struct dma_resv_list *fences = dma_resv_fences_list(obj);
+
+ dma_resv_assert_held(obj);
+
+ /* Test fence slot reservation */
+ if (fences)
+ fences->max_fences = fences->num_fences;
+}
+EXPORT_SYMBOL(dma_resv_reset_max_fences);
+#endif
+
+/**
+ * dma_resv_add_fence - Add a fence to the dma_resv obj
+ * @obj: the reservation object
+ * @fence: the fence to add
+ * @usage: how the fence is used, see enum dma_resv_usage
+ *
+ * Add a fence to a slot, @obj must be locked with dma_resv_lock(), and
+ * dma_resv_reserve_fences() has been called.
+ *
+ * See also &dma_resv.fence for a discussion of the semantics.
+ */
+void dma_resv_add_fence(struct dma_resv *obj, struct dma_fence *fence,
+ enum dma_resv_usage usage)
+{
+ struct dma_resv_list *fobj;
+ struct dma_fence *old;
+ unsigned int i, count;
+
+ dma_fence_get(fence);
+
+ dma_resv_assert_held(obj);
+
+ /* Drivers should not add containers here, instead add each fence
+ * individually.
+ */
+ WARN_ON(dma_fence_is_container(fence));
+
+ fobj = dma_resv_fences_list(obj);
+ count = fobj->num_fences;
+
+ for (i = 0; i < count; ++i) {
+ enum dma_resv_usage old_usage;
+
+ dma_resv_list_entry(fobj, i, obj, &old, &old_usage);
+ if ((old->context == fence->context && old_usage >= usage &&
+ dma_fence_is_later_or_same(fence, old)) ||
+ dma_fence_is_signaled(old)) {
+ dma_resv_list_set(fobj, i, fence, usage);
+ dma_fence_put(old);
+ return;
+ }
+ }
+
+ BUG_ON(fobj->num_fences >= fobj->max_fences);
+ count++;
+
+ dma_resv_list_set(fobj, i, fence, usage);
+ /* pointer update must be visible before we extend the num_fences */
+ smp_store_mb(fobj->num_fences, count);
+}
+EXPORT_SYMBOL(dma_resv_add_fence);
+
+/**
+ * dma_resv_replace_fences - replace fences in the dma_resv obj
+ * @obj: the reservation object
+ * @context: the context of the fences to replace
+ * @replacement: the new fence to use instead
+ * @usage: how the new fence is used, see enum dma_resv_usage
+ *
+ * Replace fences with a specified context with a new fence. Only valid if the
+ * operation represented by the original fence has no longer access to the
+ * resources represented by the dma_resv object when the new fence completes.
+ *
+ * And example for using this is replacing a preemption fence with a page table
+ * update fence which makes the resource inaccessible.
+ */
+void dma_resv_replace_fences(struct dma_resv *obj, uint64_t context,
+ struct dma_fence *replacement,
+ enum dma_resv_usage usage)
+{
+ struct dma_resv_list *list;
+ unsigned int i;
+
+ dma_resv_assert_held(obj);
+
+ list = dma_resv_fences_list(obj);
+ for (i = 0; list && i < list->num_fences; ++i) {
+ struct dma_fence *old;
+
+ dma_resv_list_entry(list, i, obj, &old, NULL);
+ if (old->context != context)
+ continue;
+
+ dma_resv_list_set(list, i, dma_fence_get(replacement), usage);
+ dma_fence_put(old);
+ }
+}
+EXPORT_SYMBOL(dma_resv_replace_fences);
+
+/* Restart the unlocked iteration by initializing the cursor object. */
+static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
+{
+ cursor->index = 0;
+ cursor->num_fences = 0;
+ cursor->fences = dma_resv_fences_list(cursor->obj);
+ if (cursor->fences)
+ cursor->num_fences = cursor->fences->num_fences;
+ cursor->is_restarted = true;
+}
+
+/* Walk to the next not signaled fence and grab a reference to it */
+static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
+{
+ if (!cursor->fences)
+ return;
+
+ do {
+ /* Drop the reference from the previous round */
+ dma_fence_put(cursor->fence);
+
+ if (cursor->index >= cursor->num_fences) {
+ cursor->fence = NULL;
+ break;
+
+ }
+
+ dma_resv_list_entry(cursor->fences, cursor->index++,
+ cursor->obj, &cursor->fence,
+ &cursor->fence_usage);
+ cursor->fence = dma_fence_get_rcu(cursor->fence);
+ if (!cursor->fence) {
+ dma_resv_iter_restart_unlocked(cursor);
+ continue;
+ }
+
+ if (!dma_fence_is_signaled(cursor->fence) &&
+ cursor->usage >= cursor->fence_usage)
+ break;
+ } while (true);
+}
+
+/**
+ * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
+ * @cursor: the cursor with the current position
+ *
+ * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
+ *
+ * Beware that the iterator can be restarted. Code which accumulates statistics
+ * or similar needs to check for this with dma_resv_iter_is_restarted(). For
+ * this reason prefer the locked dma_resv_iter_first() whenver possible.
+ *
+ * Returns the first fence from an unlocked dma_resv obj.
+ */
+struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
+{
+ rcu_read_lock();
+ do {
+ dma_resv_iter_restart_unlocked(cursor);
+ dma_resv_iter_walk_unlocked(cursor);
+ } while (dma_resv_fences_list(cursor->obj) != cursor->fences);
+ rcu_read_unlock();
+
+ return cursor->fence;
+}
+EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
+
+/**
+ * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
+ * @cursor: the cursor with the current position
+ *
+ * Beware that the iterator can be restarted. Code which accumulates statistics
+ * or similar needs to check for this with dma_resv_iter_is_restarted(). For
+ * this reason prefer the locked dma_resv_iter_next() whenver possible.
+ *
+ * Returns the next fence from an unlocked dma_resv obj.
+ */
+struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
+{
+ bool restart;
+
+ rcu_read_lock();
+ cursor->is_restarted = false;
+ restart = dma_resv_fences_list(cursor->obj) != cursor->fences;
+ do {
+ if (restart)
+ dma_resv_iter_restart_unlocked(cursor);
+ dma_resv_iter_walk_unlocked(cursor);
+ restart = true;
+ } while (dma_resv_fences_list(cursor->obj) != cursor->fences);
+ rcu_read_unlock();
+
+ return cursor->fence;
+}
+EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
+
+/**
+ * dma_resv_iter_first - first fence from a locked dma_resv object
+ * @cursor: cursor to record the current position
+ *
+ * Subsequent fences are iterated with dma_resv_iter_next_unlocked().
+ *
+ * Return the first fence in the dma_resv object while holding the
+ * &dma_resv.lock.
+ */
+struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
+{
+ struct dma_fence *fence;
+
+ dma_resv_assert_held(cursor->obj);
+
+ cursor->index = 0;
+ cursor->fences = dma_resv_fences_list(cursor->obj);
+
+ fence = dma_resv_iter_next(cursor);
+ cursor->is_restarted = true;
+ return fence;
+}
+EXPORT_SYMBOL_GPL(dma_resv_iter_first);
+
+/**
+ * dma_resv_iter_next - next fence from a locked dma_resv object
+ * @cursor: cursor to record the current position
+ *
+ * Return the next fences from the dma_resv object while holding the
+ * &dma_resv.lock.
+ */
+struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
+{
+ struct dma_fence *fence;
+
+ dma_resv_assert_held(cursor->obj);
+
+ cursor->is_restarted = false;
+
+ do {
+ if (!cursor->fences ||
+ cursor->index >= cursor->fences->num_fences)
+ return NULL;
+
+ dma_resv_list_entry(cursor->fences, cursor->index++,
+ cursor->obj, &fence, &cursor->fence_usage);
+ } while (cursor->fence_usage > cursor->usage);
+
+ return fence;
+}
+EXPORT_SYMBOL_GPL(dma_resv_iter_next);
+
+/**
+ * dma_resv_copy_fences - Copy all fences from src to dst.
+ * @dst: the destination reservation object
+ * @src: the source reservation object
+ *
+ * Copy all fences from src to dst. dst-lock must be held.
+ */
+int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
+{
+ struct dma_resv_iter cursor;
+ struct dma_resv_list *list;
+ struct dma_fence *f;
+
+ dma_resv_assert_held(dst);
+
+ list = NULL;
+
+ dma_resv_iter_begin(&cursor, src, DMA_RESV_USAGE_BOOKKEEP);
+ dma_resv_for_each_fence_unlocked(&cursor, f) {
+
+ if (dma_resv_iter_is_restarted(&cursor)) {
+ dma_resv_list_free(list);
+
+ list = dma_resv_list_alloc(cursor.num_fences);
+ if (!list) {
+ dma_resv_iter_end(&cursor);
+ return -ENOMEM;
+ }
+ list->num_fences = 0;
+ }
+
+ dma_fence_get(f);
+ dma_resv_list_set(list, list->num_fences++, f,
+ dma_resv_iter_usage(&cursor));
+ }
+ dma_resv_iter_end(&cursor);
+
+ list = rcu_replace_pointer(dst->fences, list, dma_resv_held(dst));
+ dma_resv_list_free(list);
+ return 0;
+}
+EXPORT_SYMBOL(dma_resv_copy_fences);
+
+/**
+ * dma_resv_get_fences - Get an object's fences
+ * fences without update side lock held
+ * @obj: the reservation object
+ * @usage: controls which fences to include, see enum dma_resv_usage.
+ * @num_fences: the number of fences returned
+ * @fences: the array of fence ptrs returned (array is krealloc'd to the
+ * required size, and must be freed by caller)
+ *
+ * Retrieve all fences from the reservation object.
+ * Returns either zero or -ENOMEM.
+ */
+int dma_resv_get_fences(struct dma_resv *obj, enum dma_resv_usage usage,
+ unsigned int *num_fences, struct dma_fence ***fences)
+{
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+
+ *num_fences = 0;
+ *fences = NULL;
+
+ dma_resv_iter_begin(&cursor, obj, usage);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+
+ if (dma_resv_iter_is_restarted(&cursor)) {
+ struct dma_fence **new_fences;
+ unsigned int count;
+
+ while (*num_fences)
+ dma_fence_put((*fences)[--(*num_fences)]);
+
+ count = cursor.num_fences + 1;
+
+ /* Eventually re-allocate the array */
+ new_fences = krealloc_array(*fences, count,
+ sizeof(void *),
+ GFP_KERNEL);
+ if (count && !new_fences) {
+ kfree(*fences);
+ *fences = NULL;
+ *num_fences = 0;
+ dma_resv_iter_end(&cursor);
+ return -ENOMEM;
+ }
+ *fences = new_fences;
+ }
+
+ (*fences)[(*num_fences)++] = dma_fence_get(fence);
+ }
+ dma_resv_iter_end(&cursor);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dma_resv_get_fences);
+
+/**
+ * dma_resv_get_singleton - Get a single fence for all the fences
+ * @obj: the reservation object
+ * @usage: controls which fences to include, see enum dma_resv_usage.
+ * @fence: the resulting fence
+ *
+ * Get a single fence representing all the fences inside the resv object.
+ * Returns either 0 for success or -ENOMEM.
+ *
+ * Warning: This can't be used like this when adding the fence back to the resv
+ * object since that can lead to stack corruption when finalizing the
+ * dma_fence_array.
+ *
+ * Returns 0 on success and negative error values on failure.
+ */
+int dma_resv_get_singleton(struct dma_resv *obj, enum dma_resv_usage usage,
+ struct dma_fence **fence)
+{
+ struct dma_fence_array *array;
+ struct dma_fence **fences;
+ unsigned count;
+ int r;
+
+ r = dma_resv_get_fences(obj, usage, &count, &fences);
+ if (r)
+ return r;
+
+ if (count == 0) {
+ *fence = NULL;
+ return 0;
+ }
+
+ if (count == 1) {
+ *fence = fences[0];
+ kfree(fences);
+ return 0;
+ }
+
+ array = dma_fence_array_create(count, fences,
+ dma_fence_context_alloc(1),
+ 1, false);
+ if (!array) {
+ while (count--)
+ dma_fence_put(fences[count]);
+ kfree(fences);
+ return -ENOMEM;
+ }
+
+ *fence = &array->base;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dma_resv_get_singleton);
+
+/**
+ * dma_resv_wait_timeout - Wait on reservation's objects fences
+ * @obj: the reservation object
+ * @usage: controls which fences to include, see enum dma_resv_usage.
+ * @intr: if true, do interruptible wait
+ * @timeout: timeout value in jiffies or zero to return immediately
+ *
+ * Callers are not required to hold specific locks, but maybe hold
+ * dma_resv_lock() already
+ * RETURNS
+ * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
+ * greater than zer on success.
+ */
+long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage,
+ bool intr, unsigned long timeout)
+{
+ long ret = timeout ? timeout : 1;
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+
+ dma_resv_iter_begin(&cursor, obj, usage);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+
+ ret = dma_fence_wait_timeout(fence, intr, ret);
+ if (ret <= 0) {
+ dma_resv_iter_end(&cursor);
+ return ret;
+ }
+ }
+ dma_resv_iter_end(&cursor);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
+
+
+/**
+ * dma_resv_test_signaled - Test if a reservation object's fences have been
+ * signaled.
+ * @obj: the reservation object
+ * @usage: controls which fences to include, see enum dma_resv_usage.
+ *
+ * Callers are not required to hold specific locks, but maybe hold
+ * dma_resv_lock() already.
+ *
+ * RETURNS
+ *
+ * True if all fences signaled, else false.
+ */
+bool dma_resv_test_signaled(struct dma_resv *obj, enum dma_resv_usage usage)
+{
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+
+ dma_resv_iter_begin(&cursor, obj, usage);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ dma_resv_iter_end(&cursor);
+ return false;
+ }
+ dma_resv_iter_end(&cursor);
+ return true;
+}
+EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
+
+/**
+ * dma_resv_describe - Dump description of the resv object into seq_file
+ * @obj: the reservation object
+ * @seq: the seq_file to dump the description into
+ *
+ * Dump a textual description of the fences inside an dma_resv object into the
+ * seq_file.
+ */
+void dma_resv_describe(struct dma_resv *obj, struct seq_file *seq)
+{
+ static const char *usage[] = { "kernel", "write", "read", "bookkeep" };
+ struct dma_resv_iter cursor;
+ struct dma_fence *fence;
+
+ dma_resv_for_each_fence(&cursor, obj, DMA_RESV_USAGE_READ, fence) {
+ seq_printf(seq, "\t%s fence:",
+ usage[dma_resv_iter_usage(&cursor)]);
+ dma_fence_describe(fence, seq);
+ }
+}
+EXPORT_SYMBOL_GPL(dma_resv_describe);
+
+#if IS_ENABLED(CONFIG_LOCKDEP)
+static int __init dma_resv_lockdep(void)
+{
+ struct mm_struct *mm = mm_alloc();
+ struct ww_acquire_ctx ctx;
+ struct dma_resv obj;
+ struct address_space mapping;
+ int ret;
+
+ if (!mm)
+ return -ENOMEM;
+
+ dma_resv_init(&obj);
+ address_space_init_once(&mapping);
+
+ mmap_read_lock(mm);
+ ww_acquire_init(&ctx, &reservation_ww_class);
+ ret = dma_resv_lock(&obj, &ctx);
+ if (ret == -EDEADLK)
+ dma_resv_lock_slow(&obj, &ctx);
+ fs_reclaim_acquire(GFP_KERNEL);
+ /* for unmap_mapping_range on trylocked buffer objects in shrinkers */
+ i_mmap_lock_write(&mapping);
+ i_mmap_unlock_write(&mapping);
+#ifdef CONFIG_MMU_NOTIFIER
+ lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
+ __dma_fence_might_wait();
+ lock_map_release(&__mmu_notifier_invalidate_range_start_map);
+#else
+ __dma_fence_might_wait();
+#endif
+ fs_reclaim_release(GFP_KERNEL);
+ ww_mutex_unlock(&obj.lock);
+ ww_acquire_fini(&ctx);
+ mmap_read_unlock(mm);
+
+ mmput(mm);
+
+ return 0;
+}
+subsys_initcall(dma_resv_lockdep);
+#endif
diff --git a/drivers/dma-buf/heaps/Kconfig b/drivers/dma-buf/heaps/Kconfig
new file mode 100644
index 000000000..a5eef06c4
--- /dev/null
+++ b/drivers/dma-buf/heaps/Kconfig
@@ -0,0 +1,14 @@
+config DMABUF_HEAPS_SYSTEM
+ bool "DMA-BUF System Heap"
+ depends on DMABUF_HEAPS
+ help
+ Choose this option to enable the system dmabuf heap. The system heap
+ is backed by pages from the buddy allocator. If in doubt, say Y.
+
+config DMABUF_HEAPS_CMA
+ bool "DMA-BUF CMA Heap"
+ depends on DMABUF_HEAPS && DMA_CMA
+ help
+ Choose this option to enable dma-buf CMA heap. This heap is backed
+ by the Contiguous Memory Allocator (CMA). If your system has these
+ regions, you should say Y here.
diff --git a/drivers/dma-buf/heaps/Makefile b/drivers/dma-buf/heaps/Makefile
new file mode 100644
index 000000000..974467791
--- /dev/null
+++ b/drivers/dma-buf/heaps/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_DMABUF_HEAPS_SYSTEM) += system_heap.o
+obj-$(CONFIG_DMABUF_HEAPS_CMA) += cma_heap.o
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
new file mode 100644
index 000000000..28fb04ecc
--- /dev/null
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -0,0 +1,407 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DMABUF CMA heap exporter
+ *
+ * Copyright (C) 2012, 2019, 2020 Linaro Ltd.
+ * Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
+ *
+ * Also utilizing parts of Andrew Davis' SRAM heap:
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ */
+#include <linux/cma.h>
+#include <linux/dma-buf.h>
+#include <linux/dma-heap.h>
+#include <linux/dma-map-ops.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/io.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+
+struct cma_heap {
+ struct dma_heap *heap;
+ struct cma *cma;
+};
+
+struct cma_heap_buffer {
+ struct cma_heap *heap;
+ struct list_head attachments;
+ struct mutex lock;
+ unsigned long len;
+ struct page *cma_pages;
+ struct page **pages;
+ pgoff_t pagecount;
+ int vmap_cnt;
+ void *vaddr;
+};
+
+struct dma_heap_attachment {
+ struct device *dev;
+ struct sg_table table;
+ struct list_head list;
+ bool mapped;
+};
+
+static int cma_heap_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+ int ret;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ ret = sg_alloc_table_from_pages(&a->table, buffer->pages,
+ buffer->pagecount, 0,
+ buffer->pagecount << PAGE_SHIFT,
+ GFP_KERNEL);
+ if (ret) {
+ kfree(a);
+ return ret;
+ }
+
+ a->dev = attachment->dev;
+ INIT_LIST_HEAD(&a->list);
+ a->mapped = false;
+
+ attachment->priv = a;
+
+ mutex_lock(&buffer->lock);
+ list_add(&a->list, &buffer->attachments);
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static void cma_heap_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a = attachment->priv;
+
+ mutex_lock(&buffer->lock);
+ list_del(&a->list);
+ mutex_unlock(&buffer->lock);
+
+ sg_free_table(&a->table);
+ kfree(a);
+}
+
+static struct sg_table *cma_heap_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct dma_heap_attachment *a = attachment->priv;
+ struct sg_table *table = &a->table;
+ int ret;
+
+ ret = dma_map_sgtable(attachment->dev, table, direction, 0);
+ if (ret)
+ return ERR_PTR(-ENOMEM);
+ a->mapped = true;
+ return table;
+}
+
+static void cma_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ struct dma_heap_attachment *a = attachment->priv;
+
+ a->mapped = false;
+ dma_unmap_sgtable(attachment->dev, table, direction, 0);
+}
+
+static int cma_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+
+ mutex_lock(&buffer->lock);
+
+ if (buffer->vmap_cnt)
+ invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
+
+ list_for_each_entry(a, &buffer->attachments, list) {
+ if (!a->mapped)
+ continue;
+ dma_sync_sgtable_for_cpu(a->dev, &a->table, direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static int cma_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+
+ mutex_lock(&buffer->lock);
+
+ if (buffer->vmap_cnt)
+ flush_kernel_vmap_range(buffer->vaddr, buffer->len);
+
+ list_for_each_entry(a, &buffer->attachments, list) {
+ if (!a->mapped)
+ continue;
+ dma_sync_sgtable_for_device(a->dev, &a->table, direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static vm_fault_t cma_heap_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct cma_heap_buffer *buffer = vma->vm_private_data;
+
+ if (vmf->pgoff > buffer->pagecount)
+ return VM_FAULT_SIGBUS;
+
+ vmf->page = buffer->pages[vmf->pgoff];
+ get_page(vmf->page);
+
+ return 0;
+}
+
+static const struct vm_operations_struct dma_heap_vm_ops = {
+ .fault = cma_heap_vm_fault,
+};
+
+static int cma_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+
+ if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
+ return -EINVAL;
+
+ vma->vm_ops = &dma_heap_vm_ops;
+ vma->vm_private_data = buffer;
+
+ return 0;
+}
+
+static void *cma_heap_do_vmap(struct cma_heap_buffer *buffer)
+{
+ void *vaddr;
+
+ vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL);
+ if (!vaddr)
+ return ERR_PTR(-ENOMEM);
+
+ return vaddr;
+}
+
+static int cma_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+ int ret = 0;
+
+ mutex_lock(&buffer->lock);
+ if (buffer->vmap_cnt) {
+ buffer->vmap_cnt++;
+ iosys_map_set_vaddr(map, buffer->vaddr);
+ goto out;
+ }
+
+ vaddr = cma_heap_do_vmap(buffer);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto out;
+ }
+ buffer->vaddr = vaddr;
+ buffer->vmap_cnt++;
+ iosys_map_set_vaddr(map, buffer->vaddr);
+out:
+ mutex_unlock(&buffer->lock);
+
+ return ret;
+}
+
+static void cma_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ if (!--buffer->vmap_cnt) {
+ vunmap(buffer->vaddr);
+ buffer->vaddr = NULL;
+ }
+ mutex_unlock(&buffer->lock);
+ iosys_map_clear(map);
+}
+
+static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct cma_heap_buffer *buffer = dmabuf->priv;
+ struct cma_heap *cma_heap = buffer->heap;
+
+ if (buffer->vmap_cnt > 0) {
+ WARN(1, "%s: buffer still mapped in the kernel\n", __func__);
+ vunmap(buffer->vaddr);
+ buffer->vaddr = NULL;
+ }
+
+ /* free page list */
+ kfree(buffer->pages);
+ /* release memory */
+ cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
+ kfree(buffer);
+}
+
+static const struct dma_buf_ops cma_heap_buf_ops = {
+ .attach = cma_heap_attach,
+ .detach = cma_heap_detach,
+ .map_dma_buf = cma_heap_map_dma_buf,
+ .unmap_dma_buf = cma_heap_unmap_dma_buf,
+ .begin_cpu_access = cma_heap_dma_buf_begin_cpu_access,
+ .end_cpu_access = cma_heap_dma_buf_end_cpu_access,
+ .mmap = cma_heap_mmap,
+ .vmap = cma_heap_vmap,
+ .vunmap = cma_heap_vunmap,
+ .release = cma_heap_dma_buf_release,
+};
+
+static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
+{
+ struct cma_heap *cma_heap = dma_heap_get_drvdata(heap);
+ struct cma_heap_buffer *buffer;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ size_t size = PAGE_ALIGN(len);
+ pgoff_t pagecount = size >> PAGE_SHIFT;
+ unsigned long align = get_order(size);
+ struct page *cma_pages;
+ struct dma_buf *dmabuf;
+ int ret = -ENOMEM;
+ pgoff_t pg;
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&buffer->attachments);
+ mutex_init(&buffer->lock);
+ buffer->len = size;
+
+ if (align > CONFIG_CMA_ALIGNMENT)
+ align = CONFIG_CMA_ALIGNMENT;
+
+ cma_pages = cma_alloc(cma_heap->cma, pagecount, align, false);
+ if (!cma_pages)
+ goto free_buffer;
+
+ /* Clear the cma pages */
+ if (PageHighMem(cma_pages)) {
+ unsigned long nr_clear_pages = pagecount;
+ struct page *page = cma_pages;
+
+ while (nr_clear_pages > 0) {
+ void *vaddr = kmap_atomic(page);
+
+ memset(vaddr, 0, PAGE_SIZE);
+ kunmap_atomic(vaddr);
+ /*
+ * Avoid wasting time zeroing memory if the process
+ * has been killed by by SIGKILL
+ */
+ if (fatal_signal_pending(current))
+ goto free_cma;
+ page++;
+ nr_clear_pages--;
+ }
+ } else {
+ memset(page_address(cma_pages), 0, size);
+ }
+
+ buffer->pages = kmalloc_array(pagecount, sizeof(*buffer->pages), GFP_KERNEL);
+ if (!buffer->pages) {
+ ret = -ENOMEM;
+ goto free_cma;
+ }
+
+ for (pg = 0; pg < pagecount; pg++)
+ buffer->pages[pg] = &cma_pages[pg];
+
+ buffer->cma_pages = cma_pages;
+ buffer->heap = cma_heap;
+ buffer->pagecount = pagecount;
+
+ /* create the dmabuf */
+ exp_info.exp_name = dma_heap_get_name(heap);
+ exp_info.ops = &cma_heap_buf_ops;
+ exp_info.size = buffer->len;
+ exp_info.flags = fd_flags;
+ exp_info.priv = buffer;
+ dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dmabuf)) {
+ ret = PTR_ERR(dmabuf);
+ goto free_pages;
+ }
+ return dmabuf;
+
+free_pages:
+ kfree(buffer->pages);
+free_cma:
+ cma_release(cma_heap->cma, cma_pages, pagecount);
+free_buffer:
+ kfree(buffer);
+
+ return ERR_PTR(ret);
+}
+
+static const struct dma_heap_ops cma_heap_ops = {
+ .allocate = cma_heap_allocate,
+};
+
+static int __add_cma_heap(struct cma *cma, void *data)
+{
+ struct cma_heap *cma_heap;
+ struct dma_heap_export_info exp_info;
+
+ cma_heap = kzalloc(sizeof(*cma_heap), GFP_KERNEL);
+ if (!cma_heap)
+ return -ENOMEM;
+ cma_heap->cma = cma;
+
+ exp_info.name = cma_get_name(cma);
+ exp_info.ops = &cma_heap_ops;
+ exp_info.priv = cma_heap;
+
+ cma_heap->heap = dma_heap_add(&exp_info);
+ if (IS_ERR(cma_heap->heap)) {
+ int ret = PTR_ERR(cma_heap->heap);
+
+ kfree(cma_heap);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int add_default_cma_heap(void)
+{
+ struct cma *default_cma = dev_get_cma_area(NULL);
+ int ret = 0;
+
+ if (default_cma)
+ ret = __add_cma_heap(default_cma, NULL);
+
+ return ret;
+}
+module_init(add_default_cma_heap);
+MODULE_DESCRIPTION("DMA-BUF CMA Heap");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma-buf/heaps/system_heap.c b/drivers/dma-buf/heaps/system_heap.c
new file mode 100644
index 000000000..fcf836ba9
--- /dev/null
+++ b/drivers/dma-buf/heaps/system_heap.c
@@ -0,0 +1,440 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * DMABUF System heap exporter
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019, 2020 Linaro Ltd.
+ *
+ * Portions based off of Andrew Davis' SRAM heap:
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Andrew F. Davis <afd@ti.com>
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/dma-mapping.h>
+#include <linux/dma-heap.h>
+#include <linux/err.h>
+#include <linux/highmem.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+static struct dma_heap *sys_heap;
+
+struct system_heap_buffer {
+ struct dma_heap *heap;
+ struct list_head attachments;
+ struct mutex lock;
+ unsigned long len;
+ struct sg_table sg_table;
+ int vmap_cnt;
+ void *vaddr;
+};
+
+struct dma_heap_attachment {
+ struct device *dev;
+ struct sg_table *table;
+ struct list_head list;
+ bool mapped;
+};
+
+#define LOW_ORDER_GFP (GFP_HIGHUSER | __GFP_ZERO | __GFP_COMP)
+#define MID_ORDER_GFP (LOW_ORDER_GFP | __GFP_NOWARN)
+#define HIGH_ORDER_GFP (((GFP_HIGHUSER | __GFP_ZERO | __GFP_NOWARN \
+ | __GFP_NORETRY) & ~__GFP_RECLAIM) \
+ | __GFP_COMP)
+static gfp_t order_flags[] = {HIGH_ORDER_GFP, MID_ORDER_GFP, LOW_ORDER_GFP};
+/*
+ * The selection of the orders used for allocation (1MB, 64K, 4K) is designed
+ * to match with the sizes often found in IOMMUs. Using order 4 pages instead
+ * of order 0 pages can significantly improve the performance of many IOMMUs
+ * by reducing TLB pressure and time spent updating page tables.
+ */
+static const unsigned int orders[] = {8, 4, 0};
+#define NUM_ORDERS ARRAY_SIZE(orders)
+
+static struct sg_table *dup_sg_table(struct sg_table *table)
+{
+ struct sg_table *new_table;
+ int ret, i;
+ struct scatterlist *sg, *new_sg;
+
+ new_table = kzalloc(sizeof(*new_table), GFP_KERNEL);
+ if (!new_table)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sg_alloc_table(new_table, table->orig_nents, GFP_KERNEL);
+ if (ret) {
+ kfree(new_table);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ new_sg = new_table->sgl;
+ for_each_sgtable_sg(table, sg, i) {
+ sg_set_page(new_sg, sg_page(sg), sg->length, sg->offset);
+ new_sg = sg_next(new_sg);
+ }
+
+ return new_table;
+}
+
+static int system_heap_attach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+ struct sg_table *table;
+
+ a = kzalloc(sizeof(*a), GFP_KERNEL);
+ if (!a)
+ return -ENOMEM;
+
+ table = dup_sg_table(&buffer->sg_table);
+ if (IS_ERR(table)) {
+ kfree(a);
+ return -ENOMEM;
+ }
+
+ a->table = table;
+ a->dev = attachment->dev;
+ INIT_LIST_HEAD(&a->list);
+ a->mapped = false;
+
+ attachment->priv = a;
+
+ mutex_lock(&buffer->lock);
+ list_add(&a->list, &buffer->attachments);
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static void system_heap_detach(struct dma_buf *dmabuf,
+ struct dma_buf_attachment *attachment)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a = attachment->priv;
+
+ mutex_lock(&buffer->lock);
+ list_del(&a->list);
+ mutex_unlock(&buffer->lock);
+
+ sg_free_table(a->table);
+ kfree(a->table);
+ kfree(a);
+}
+
+static struct sg_table *system_heap_map_dma_buf(struct dma_buf_attachment *attachment,
+ enum dma_data_direction direction)
+{
+ struct dma_heap_attachment *a = attachment->priv;
+ struct sg_table *table = a->table;
+ int ret;
+
+ ret = dma_map_sgtable(attachment->dev, table, direction, 0);
+ if (ret)
+ return ERR_PTR(ret);
+
+ a->mapped = true;
+ return table;
+}
+
+static void system_heap_unmap_dma_buf(struct dma_buf_attachment *attachment,
+ struct sg_table *table,
+ enum dma_data_direction direction)
+{
+ struct dma_heap_attachment *a = attachment->priv;
+
+ a->mapped = false;
+ dma_unmap_sgtable(attachment->dev, table, direction, 0);
+}
+
+static int system_heap_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+
+ mutex_lock(&buffer->lock);
+
+ if (buffer->vmap_cnt)
+ invalidate_kernel_vmap_range(buffer->vaddr, buffer->len);
+
+ list_for_each_entry(a, &buffer->attachments, list) {
+ if (!a->mapped)
+ continue;
+ dma_sync_sgtable_for_cpu(a->dev, a->table, direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static int system_heap_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
+ enum dma_data_direction direction)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct dma_heap_attachment *a;
+
+ mutex_lock(&buffer->lock);
+
+ if (buffer->vmap_cnt)
+ flush_kernel_vmap_range(buffer->vaddr, buffer->len);
+
+ list_for_each_entry(a, &buffer->attachments, list) {
+ if (!a->mapped)
+ continue;
+ dma_sync_sgtable_for_device(a->dev, a->table, direction);
+ }
+ mutex_unlock(&buffer->lock);
+
+ return 0;
+}
+
+static int system_heap_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct sg_table *table = &buffer->sg_table;
+ unsigned long addr = vma->vm_start;
+ struct sg_page_iter piter;
+ int ret;
+
+ for_each_sgtable_page(table, &piter, vma->vm_pgoff) {
+ struct page *page = sg_page_iter_page(&piter);
+
+ ret = remap_pfn_range(vma, addr, page_to_pfn(page), PAGE_SIZE,
+ vma->vm_page_prot);
+ if (ret)
+ return ret;
+ addr += PAGE_SIZE;
+ if (addr >= vma->vm_end)
+ return 0;
+ }
+ return 0;
+}
+
+static void *system_heap_do_vmap(struct system_heap_buffer *buffer)
+{
+ struct sg_table *table = &buffer->sg_table;
+ int npages = PAGE_ALIGN(buffer->len) / PAGE_SIZE;
+ struct page **pages = vmalloc(sizeof(struct page *) * npages);
+ struct page **tmp = pages;
+ struct sg_page_iter piter;
+ void *vaddr;
+
+ if (!pages)
+ return ERR_PTR(-ENOMEM);
+
+ for_each_sgtable_page(table, &piter, 0) {
+ WARN_ON(tmp - pages >= npages);
+ *tmp++ = sg_page_iter_page(&piter);
+ }
+
+ vaddr = vmap(pages, npages, VM_MAP, PAGE_KERNEL);
+ vfree(pages);
+
+ if (!vaddr)
+ return ERR_PTR(-ENOMEM);
+
+ return vaddr;
+}
+
+static int system_heap_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ void *vaddr;
+ int ret = 0;
+
+ mutex_lock(&buffer->lock);
+ if (buffer->vmap_cnt) {
+ buffer->vmap_cnt++;
+ iosys_map_set_vaddr(map, buffer->vaddr);
+ goto out;
+ }
+
+ vaddr = system_heap_do_vmap(buffer);
+ if (IS_ERR(vaddr)) {
+ ret = PTR_ERR(vaddr);
+ goto out;
+ }
+
+ buffer->vaddr = vaddr;
+ buffer->vmap_cnt++;
+ iosys_map_set_vaddr(map, buffer->vaddr);
+out:
+ mutex_unlock(&buffer->lock);
+
+ return ret;
+}
+
+static void system_heap_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+
+ mutex_lock(&buffer->lock);
+ if (!--buffer->vmap_cnt) {
+ vunmap(buffer->vaddr);
+ buffer->vaddr = NULL;
+ }
+ mutex_unlock(&buffer->lock);
+ iosys_map_clear(map);
+}
+
+static void system_heap_dma_buf_release(struct dma_buf *dmabuf)
+{
+ struct system_heap_buffer *buffer = dmabuf->priv;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ int i;
+
+ table = &buffer->sg_table;
+ for_each_sgtable_sg(table, sg, i) {
+ struct page *page = sg_page(sg);
+
+ __free_pages(page, compound_order(page));
+ }
+ sg_free_table(table);
+ kfree(buffer);
+}
+
+static const struct dma_buf_ops system_heap_buf_ops = {
+ .attach = system_heap_attach,
+ .detach = system_heap_detach,
+ .map_dma_buf = system_heap_map_dma_buf,
+ .unmap_dma_buf = system_heap_unmap_dma_buf,
+ .begin_cpu_access = system_heap_dma_buf_begin_cpu_access,
+ .end_cpu_access = system_heap_dma_buf_end_cpu_access,
+ .mmap = system_heap_mmap,
+ .vmap = system_heap_vmap,
+ .vunmap = system_heap_vunmap,
+ .release = system_heap_dma_buf_release,
+};
+
+static struct page *alloc_largest_available(unsigned long size,
+ unsigned int max_order)
+{
+ struct page *page;
+ int i;
+
+ for (i = 0; i < NUM_ORDERS; i++) {
+ if (size < (PAGE_SIZE << orders[i]))
+ continue;
+ if (max_order < orders[i])
+ continue;
+
+ page = alloc_pages(order_flags[i], orders[i]);
+ if (!page)
+ continue;
+ return page;
+ }
+ return NULL;
+}
+
+static struct dma_buf *system_heap_allocate(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags)
+{
+ struct system_heap_buffer *buffer;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ unsigned long size_remaining = len;
+ unsigned int max_order = orders[0];
+ struct dma_buf *dmabuf;
+ struct sg_table *table;
+ struct scatterlist *sg;
+ struct list_head pages;
+ struct page *page, *tmp_page;
+ int i, ret = -ENOMEM;
+
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&buffer->attachments);
+ mutex_init(&buffer->lock);
+ buffer->heap = heap;
+ buffer->len = len;
+
+ INIT_LIST_HEAD(&pages);
+ i = 0;
+ while (size_remaining > 0) {
+ /*
+ * Avoid trying to allocate memory if the process
+ * has been killed by SIGKILL
+ */
+ if (fatal_signal_pending(current)) {
+ ret = -EINTR;
+ goto free_buffer;
+ }
+
+ page = alloc_largest_available(size_remaining, max_order);
+ if (!page)
+ goto free_buffer;
+
+ list_add_tail(&page->lru, &pages);
+ size_remaining -= page_size(page);
+ max_order = compound_order(page);
+ i++;
+ }
+
+ table = &buffer->sg_table;
+ if (sg_alloc_table(table, i, GFP_KERNEL))
+ goto free_buffer;
+
+ sg = table->sgl;
+ list_for_each_entry_safe(page, tmp_page, &pages, lru) {
+ sg_set_page(sg, page, page_size(page), 0);
+ sg = sg_next(sg);
+ list_del(&page->lru);
+ }
+
+ /* create the dmabuf */
+ exp_info.exp_name = dma_heap_get_name(heap);
+ exp_info.ops = &system_heap_buf_ops;
+ exp_info.size = buffer->len;
+ exp_info.flags = fd_flags;
+ exp_info.priv = buffer;
+ dmabuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dmabuf)) {
+ ret = PTR_ERR(dmabuf);
+ goto free_pages;
+ }
+ return dmabuf;
+
+free_pages:
+ for_each_sgtable_sg(table, sg, i) {
+ struct page *p = sg_page(sg);
+
+ __free_pages(p, compound_order(p));
+ }
+ sg_free_table(table);
+free_buffer:
+ list_for_each_entry_safe(page, tmp_page, &pages, lru)
+ __free_pages(page, compound_order(page));
+ kfree(buffer);
+
+ return ERR_PTR(ret);
+}
+
+static const struct dma_heap_ops system_heap_ops = {
+ .allocate = system_heap_allocate,
+};
+
+static int system_heap_create(void)
+{
+ struct dma_heap_export_info exp_info;
+
+ exp_info.name = "system";
+ exp_info.ops = &system_heap_ops;
+ exp_info.priv = NULL;
+
+ sys_heap = dma_heap_add(&exp_info);
+ if (IS_ERR(sys_heap))
+ return PTR_ERR(sys_heap);
+
+ return 0;
+}
+module_init(system_heap_create);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma-buf/selftest.c b/drivers/dma-buf/selftest.c
new file mode 100644
index 000000000..c60b6944b
--- /dev/null
+++ b/drivers/dma-buf/selftest.c
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/compiler.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+
+#include "selftest.h"
+
+enum {
+#define selftest(n, func) __idx_##n,
+#include "selftests.h"
+#undef selftest
+};
+
+#define selftest(n, f) [__idx_##n] = { .name = #n, .func = f },
+static struct selftest {
+ bool enabled;
+ const char *name;
+ int (*func)(void);
+} selftests[] = {
+#include "selftests.h"
+};
+#undef selftest
+
+/* Embed the line number into the parameter name so that we can order tests */
+#define param(n) __PASTE(igt__, __PASTE(__PASTE(__LINE__, __), n))
+#define selftest_0(n, func, id) \
+module_param_named(id, selftests[__idx_##n].enabled, bool, 0400);
+#define selftest(n, func) selftest_0(n, func, param(n))
+#include "selftests.h"
+#undef selftest
+
+int __sanitycheck__(void)
+{
+ pr_debug("Hello World!\n");
+ return 0;
+}
+
+static char *__st_filter;
+
+static bool apply_subtest_filter(const char *caller, const char *name)
+{
+ char *filter, *sep, *tok;
+ bool result = true;
+
+ filter = kstrdup(__st_filter, GFP_KERNEL);
+ for (sep = filter; (tok = strsep(&sep, ","));) {
+ bool allow = true;
+ char *sl;
+
+ if (*tok == '!') {
+ allow = false;
+ tok++;
+ }
+
+ if (*tok == '\0')
+ continue;
+
+ sl = strchr(tok, '/');
+ if (sl) {
+ *sl++ = '\0';
+ if (strcmp(tok, caller)) {
+ if (allow)
+ result = false;
+ continue;
+ }
+ tok = sl;
+ }
+
+ if (strcmp(tok, name)) {
+ if (allow)
+ result = false;
+ continue;
+ }
+
+ result = allow;
+ break;
+ }
+ kfree(filter);
+
+ return result;
+}
+
+int
+__subtests(const char *caller, const struct subtest *st, int count, void *data)
+{
+ int err;
+
+ for (; count--; st++) {
+ cond_resched();
+ if (signal_pending(current))
+ return -EINTR;
+
+ if (!apply_subtest_filter(caller, st->name))
+ continue;
+
+ pr_info("dma-buf: Running %s/%s\n", caller, st->name);
+
+ err = st->func(data);
+ if (err && err != -EINTR) {
+ pr_err("dma-buf/%s: %s failed with error %d\n",
+ caller, st->name, err);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static void set_default_test_all(struct selftest *st, unsigned long count)
+{
+ unsigned long i;
+
+ for (i = 0; i < count; i++)
+ if (st[i].enabled)
+ return;
+
+ for (i = 0; i < count; i++)
+ st[i].enabled = true;
+}
+
+static int run_selftests(struct selftest *st, unsigned long count)
+{
+ int err = 0;
+
+ set_default_test_all(st, count);
+
+ /* Tests are listed in natural order in selftests.h */
+ for (; count--; st++) {
+ if (!st->enabled)
+ continue;
+
+ pr_info("dma-buf: Running %s\n", st->name);
+ err = st->func();
+ if (err)
+ break;
+ }
+
+ if (WARN(err > 0 || err == -ENOTTY,
+ "%s returned %d, conflicting with selftest's magic values!\n",
+ st->name, err))
+ err = -1;
+
+ return err;
+}
+
+static int __init st_init(void)
+{
+ return run_selftests(selftests, ARRAY_SIZE(selftests));
+}
+
+static void __exit st_exit(void)
+{
+}
+
+module_param_named(st_filter, __st_filter, charp, 0400);
+module_init(st_init);
+module_exit(st_exit);
+
+MODULE_DESCRIPTION("Self-test harness for dma-buf");
+MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/dma-buf/selftest.h b/drivers/dma-buf/selftest.h
new file mode 100644
index 000000000..45793aff6
--- /dev/null
+++ b/drivers/dma-buf/selftest.h
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __SELFTEST_H__
+#define __SELFTEST_H__
+
+#include <linux/compiler.h>
+
+#define selftest(name, func) int func(void);
+#include "selftests.h"
+#undef selftest
+
+struct subtest {
+ int (*func)(void *data);
+ const char *name;
+};
+
+int __subtests(const char *caller,
+ const struct subtest *st,
+ int count,
+ void *data);
+#define subtests(T, data) \
+ __subtests(__func__, T, ARRAY_SIZE(T), data)
+
+#define SUBTEST(x) { x, #x }
+
+#endif /* __SELFTEST_H__ */
diff --git a/drivers/dma-buf/selftests.h b/drivers/dma-buf/selftests.h
new file mode 100644
index 000000000..851965867
--- /dev/null
+++ b/drivers/dma-buf/selftests.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: MIT */
+/* List each unit test as selftest(name, function)
+ *
+ * The name is used as both an enum and expanded as subtest__name to create
+ * a module parameter. It must be unique and legal for a C identifier.
+ *
+ * The function should be of type int function(void). It may be conditionally
+ * compiled using #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST).
+ *
+ * Tests are executed in order by igt/dmabuf_selftest
+ */
+selftest(sanitycheck, __sanitycheck__) /* keep first (igt selfcheck) */
+selftest(dma_fence, dma_fence)
+selftest(dma_fence_chain, dma_fence_chain)
+selftest(dma_fence_unwrap, dma_fence_unwrap)
+selftest(dma_resv, dma_resv)
diff --git a/drivers/dma-buf/st-dma-fence-chain.c b/drivers/dma-buf/st-dma-fence-chain.c
new file mode 100644
index 000000000..0a9b099d0
--- /dev/null
+++ b/drivers/dma-buf/st-dma-fence-chain.c
@@ -0,0 +1,710 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-chain.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/random.h>
+
+#include "selftest.h"
+
+#define CHAIN_SZ (4 << 10)
+
+static struct kmem_cache *slab_fences;
+
+static inline struct mock_fence {
+ struct dma_fence base;
+ spinlock_t lock;
+} *to_mock_fence(struct dma_fence *f) {
+ return container_of(f, struct mock_fence, base);
+}
+
+static const char *mock_name(struct dma_fence *f)
+{
+ return "mock";
+}
+
+static void mock_fence_release(struct dma_fence *f)
+{
+ kmem_cache_free(slab_fences, to_mock_fence(f));
+}
+
+static const struct dma_fence_ops mock_ops = {
+ .get_driver_name = mock_name,
+ .get_timeline_name = mock_name,
+ .release = mock_fence_release,
+};
+
+static struct dma_fence *mock_fence(void)
+{
+ struct mock_fence *f;
+
+ f = kmem_cache_alloc(slab_fences, GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ spin_lock_init(&f->lock);
+ dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
+
+ return &f->base;
+}
+
+static struct dma_fence *mock_chain(struct dma_fence *prev,
+ struct dma_fence *fence,
+ u64 seqno)
+{
+ struct dma_fence_chain *f;
+
+ f = dma_fence_chain_alloc();
+ if (!f)
+ return NULL;
+
+ dma_fence_chain_init(f, dma_fence_get(prev), dma_fence_get(fence),
+ seqno);
+
+ return &f->base;
+}
+
+static int sanitycheck(void *arg)
+{
+ struct dma_fence *f, *chain;
+ int err = 0;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ chain = mock_chain(NULL, f, 1);
+ if (!chain)
+ err = -ENOMEM;
+
+ dma_fence_enable_sw_signaling(chain);
+
+ dma_fence_signal(f);
+ dma_fence_put(f);
+
+ dma_fence_put(chain);
+
+ return err;
+}
+
+struct fence_chains {
+ unsigned int chain_length;
+ struct dma_fence **fences;
+ struct dma_fence **chains;
+
+ struct dma_fence *tail;
+};
+
+static uint64_t seqno_inc(unsigned int i)
+{
+ return i + 1;
+}
+
+static int fence_chains_init(struct fence_chains *fc, unsigned int count,
+ uint64_t (*seqno_fn)(unsigned int))
+{
+ unsigned int i;
+ int err = 0;
+
+ fc->chains = kvmalloc_array(count, sizeof(*fc->chains),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!fc->chains)
+ return -ENOMEM;
+
+ fc->fences = kvmalloc_array(count, sizeof(*fc->fences),
+ GFP_KERNEL | __GFP_ZERO);
+ if (!fc->fences) {
+ err = -ENOMEM;
+ goto err_chains;
+ }
+
+ fc->tail = NULL;
+ for (i = 0; i < count; i++) {
+ fc->fences[i] = mock_fence();
+ if (!fc->fences[i]) {
+ err = -ENOMEM;
+ goto unwind;
+ }
+
+ fc->chains[i] = mock_chain(fc->tail,
+ fc->fences[i],
+ seqno_fn(i));
+ if (!fc->chains[i]) {
+ err = -ENOMEM;
+ goto unwind;
+ }
+
+ fc->tail = fc->chains[i];
+
+ dma_fence_enable_sw_signaling(fc->chains[i]);
+ }
+
+ fc->chain_length = i;
+ return 0;
+
+unwind:
+ for (i = 0; i < count; i++) {
+ dma_fence_put(fc->fences[i]);
+ dma_fence_put(fc->chains[i]);
+ }
+ kvfree(fc->fences);
+err_chains:
+ kvfree(fc->chains);
+ return err;
+}
+
+static void fence_chains_fini(struct fence_chains *fc)
+{
+ unsigned int i;
+
+ for (i = 0; i < fc->chain_length; i++) {
+ dma_fence_signal(fc->fences[i]);
+ dma_fence_put(fc->fences[i]);
+ }
+ kvfree(fc->fences);
+
+ for (i = 0; i < fc->chain_length; i++)
+ dma_fence_put(fc->chains[i]);
+ kvfree(fc->chains);
+}
+
+static int find_seqno(void *arg)
+{
+ struct fence_chains fc;
+ struct dma_fence *fence;
+ int err;
+ int i;
+
+ err = fence_chains_init(&fc, 64, seqno_inc);
+ if (err)
+ return err;
+
+ fence = dma_fence_get(fc.tail);
+ err = dma_fence_chain_find_seqno(&fence, 0);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Reported %d for find_seqno(0)!\n", err);
+ goto err;
+ }
+
+ for (i = 0; i < fc.chain_length; i++) {
+ fence = dma_fence_get(fc.tail);
+ err = dma_fence_chain_find_seqno(&fence, i + 1);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Reported %d for find_seqno(%d:%d)!\n",
+ err, fc.chain_length + 1, i + 1);
+ goto err;
+ }
+ if (fence != fc.chains[i]) {
+ pr_err("Incorrect fence reported by find_seqno(%d:%d)\n",
+ fc.chain_length + 1, i + 1);
+ err = -EINVAL;
+ goto err;
+ }
+
+ dma_fence_get(fence);
+ err = dma_fence_chain_find_seqno(&fence, i + 1);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Error reported for finding self\n");
+ goto err;
+ }
+ if (fence != fc.chains[i]) {
+ pr_err("Incorrect fence reported by find self\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ dma_fence_get(fence);
+ err = dma_fence_chain_find_seqno(&fence, i + 2);
+ dma_fence_put(fence);
+ if (!err) {
+ pr_err("Error not reported for future fence: find_seqno(%d:%d)!\n",
+ i + 1, i + 2);
+ err = -EINVAL;
+ goto err;
+ }
+
+ dma_fence_get(fence);
+ err = dma_fence_chain_find_seqno(&fence, i);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Error reported for previous fence!\n");
+ goto err;
+ }
+ if (i > 0 && fence != fc.chains[i - 1]) {
+ pr_err("Incorrect fence reported by find_seqno(%d:%d)\n",
+ i + 1, i);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+static int find_signaled(void *arg)
+{
+ struct fence_chains fc;
+ struct dma_fence *fence;
+ int err;
+
+ err = fence_chains_init(&fc, 2, seqno_inc);
+ if (err)
+ return err;
+
+ dma_fence_signal(fc.fences[0]);
+
+ fence = dma_fence_get(fc.tail);
+ err = dma_fence_chain_find_seqno(&fence, 1);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Reported %d for find_seqno()!\n", err);
+ goto err;
+ }
+
+ if (fence && fence != fc.chains[0]) {
+ pr_err("Incorrect chain-fence.seqno:%lld reported for completed seqno:1\n",
+ fence->seqno);
+
+ dma_fence_get(fence);
+ err = dma_fence_chain_find_seqno(&fence, 1);
+ dma_fence_put(fence);
+ if (err)
+ pr_err("Reported %d for finding self!\n", err);
+
+ err = -EINVAL;
+ }
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+static int find_out_of_order(void *arg)
+{
+ struct fence_chains fc;
+ struct dma_fence *fence;
+ int err;
+
+ err = fence_chains_init(&fc, 3, seqno_inc);
+ if (err)
+ return err;
+
+ dma_fence_signal(fc.fences[1]);
+
+ fence = dma_fence_get(fc.tail);
+ err = dma_fence_chain_find_seqno(&fence, 2);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Reported %d for find_seqno()!\n", err);
+ goto err;
+ }
+
+ /*
+ * We signaled the middle fence (2) of the 1-2-3 chain. The behavior
+ * of the dma-fence-chain is to make us wait for all the fences up to
+ * the point we want. Since fence 1 is still not signaled, this what
+ * we should get as fence to wait upon (fence 2 being garbage
+ * collected during the traversal of the chain).
+ */
+ if (fence != fc.chains[0]) {
+ pr_err("Incorrect chain-fence.seqno:%lld reported for completed seqno:2\n",
+ fence ? fence->seqno : 0);
+
+ err = -EINVAL;
+ }
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+static uint64_t seqno_inc2(unsigned int i)
+{
+ return 2 * i + 2;
+}
+
+static int find_gap(void *arg)
+{
+ struct fence_chains fc;
+ struct dma_fence *fence;
+ int err;
+ int i;
+
+ err = fence_chains_init(&fc, 64, seqno_inc2);
+ if (err)
+ return err;
+
+ for (i = 0; i < fc.chain_length; i++) {
+ fence = dma_fence_get(fc.tail);
+ err = dma_fence_chain_find_seqno(&fence, 2 * i + 1);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Reported %d for find_seqno(%d:%d)!\n",
+ err, fc.chain_length + 1, 2 * i + 1);
+ goto err;
+ }
+ if (fence != fc.chains[i]) {
+ pr_err("Incorrect fence.seqno:%lld reported by find_seqno(%d:%d)\n",
+ fence->seqno,
+ fc.chain_length + 1,
+ 2 * i + 1);
+ err = -EINVAL;
+ goto err;
+ }
+
+ dma_fence_get(fence);
+ err = dma_fence_chain_find_seqno(&fence, 2 * i + 2);
+ dma_fence_put(fence);
+ if (err) {
+ pr_err("Error reported for finding self\n");
+ goto err;
+ }
+ if (fence != fc.chains[i]) {
+ pr_err("Incorrect fence reported by find self\n");
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+struct find_race {
+ struct fence_chains fc;
+ atomic_t children;
+};
+
+static int __find_race(void *arg)
+{
+ struct find_race *data = arg;
+ int err = 0;
+
+ while (!kthread_should_stop()) {
+ struct dma_fence *fence = dma_fence_get(data->fc.tail);
+ int seqno;
+
+ seqno = prandom_u32_max(data->fc.chain_length) + 1;
+
+ err = dma_fence_chain_find_seqno(&fence, seqno);
+ if (err) {
+ pr_err("Failed to find fence seqno:%d\n",
+ seqno);
+ dma_fence_put(fence);
+ break;
+ }
+ if (!fence)
+ goto signal;
+
+ /*
+ * We can only find ourselves if we are on fence we were
+ * looking for.
+ */
+ if (fence->seqno == seqno) {
+ err = dma_fence_chain_find_seqno(&fence, seqno);
+ if (err) {
+ pr_err("Reported an invalid fence for find-self:%d\n",
+ seqno);
+ dma_fence_put(fence);
+ break;
+ }
+ }
+
+ dma_fence_put(fence);
+
+signal:
+ seqno = prandom_u32_max(data->fc.chain_length - 1);
+ dma_fence_signal(data->fc.fences[seqno]);
+ cond_resched();
+ }
+
+ if (atomic_dec_and_test(&data->children))
+ wake_up_var(&data->children);
+ return err;
+}
+
+static int find_race(void *arg)
+{
+ struct find_race data;
+ int ncpus = num_online_cpus();
+ struct task_struct **threads;
+ unsigned long count;
+ int err;
+ int i;
+
+ err = fence_chains_init(&data.fc, CHAIN_SZ, seqno_inc);
+ if (err)
+ return err;
+
+ threads = kmalloc_array(ncpus, sizeof(*threads), GFP_KERNEL);
+ if (!threads) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ atomic_set(&data.children, 0);
+ for (i = 0; i < ncpus; i++) {
+ threads[i] = kthread_run(__find_race, &data, "dmabuf/%d", i);
+ if (IS_ERR(threads[i])) {
+ ncpus = i;
+ break;
+ }
+ atomic_inc(&data.children);
+ get_task_struct(threads[i]);
+ }
+
+ wait_var_event_timeout(&data.children,
+ !atomic_read(&data.children),
+ 5 * HZ);
+
+ for (i = 0; i < ncpus; i++) {
+ int ret;
+
+ ret = kthread_stop(threads[i]);
+ if (ret && !err)
+ err = ret;
+ put_task_struct(threads[i]);
+ }
+ kfree(threads);
+
+ count = 0;
+ for (i = 0; i < data.fc.chain_length; i++)
+ if (dma_fence_is_signaled(data.fc.fences[i]))
+ count++;
+ pr_info("Completed %lu cycles\n", count);
+
+err:
+ fence_chains_fini(&data.fc);
+ return err;
+}
+
+static int signal_forward(void *arg)
+{
+ struct fence_chains fc;
+ int err;
+ int i;
+
+ err = fence_chains_init(&fc, 64, seqno_inc);
+ if (err)
+ return err;
+
+ for (i = 0; i < fc.chain_length; i++) {
+ dma_fence_signal(fc.fences[i]);
+
+ if (!dma_fence_is_signaled(fc.chains[i])) {
+ pr_err("chain[%d] not signaled!\n", i);
+ err = -EINVAL;
+ goto err;
+ }
+
+ if (i + 1 < fc.chain_length &&
+ dma_fence_is_signaled(fc.chains[i + 1])) {
+ pr_err("chain[%d] is signaled!\n", i);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+static int signal_backward(void *arg)
+{
+ struct fence_chains fc;
+ int err;
+ int i;
+
+ err = fence_chains_init(&fc, 64, seqno_inc);
+ if (err)
+ return err;
+
+ for (i = fc.chain_length; i--; ) {
+ dma_fence_signal(fc.fences[i]);
+
+ if (i > 0 && dma_fence_is_signaled(fc.chains[i])) {
+ pr_err("chain[%d] is signaled!\n", i);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+ for (i = 0; i < fc.chain_length; i++) {
+ if (!dma_fence_is_signaled(fc.chains[i])) {
+ pr_err("chain[%d] was not signaled!\n", i);
+ err = -EINVAL;
+ goto err;
+ }
+ }
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+static int __wait_fence_chains(void *arg)
+{
+ struct fence_chains *fc = arg;
+
+ if (dma_fence_wait(fc->tail, false))
+ return -EIO;
+
+ return 0;
+}
+
+static int wait_forward(void *arg)
+{
+ struct fence_chains fc;
+ struct task_struct *tsk;
+ int err;
+ int i;
+
+ err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
+ if (err)
+ return err;
+
+ tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait");
+ if (IS_ERR(tsk)) {
+ err = PTR_ERR(tsk);
+ goto err;
+ }
+ get_task_struct(tsk);
+ yield_to(tsk, true);
+
+ for (i = 0; i < fc.chain_length; i++)
+ dma_fence_signal(fc.fences[i]);
+
+ err = kthread_stop(tsk);
+ put_task_struct(tsk);
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+static int wait_backward(void *arg)
+{
+ struct fence_chains fc;
+ struct task_struct *tsk;
+ int err;
+ int i;
+
+ err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
+ if (err)
+ return err;
+
+ tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait");
+ if (IS_ERR(tsk)) {
+ err = PTR_ERR(tsk);
+ goto err;
+ }
+ get_task_struct(tsk);
+ yield_to(tsk, true);
+
+ for (i = fc.chain_length; i--; )
+ dma_fence_signal(fc.fences[i]);
+
+ err = kthread_stop(tsk);
+ put_task_struct(tsk);
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+static void randomise_fences(struct fence_chains *fc)
+{
+ unsigned int count = fc->chain_length;
+
+ /* Fisher-Yates shuffle courtesy of Knuth */
+ while (--count) {
+ unsigned int swp;
+
+ swp = prandom_u32_max(count + 1);
+ if (swp == count)
+ continue;
+
+ swap(fc->fences[count], fc->fences[swp]);
+ }
+}
+
+static int wait_random(void *arg)
+{
+ struct fence_chains fc;
+ struct task_struct *tsk;
+ int err;
+ int i;
+
+ err = fence_chains_init(&fc, CHAIN_SZ, seqno_inc);
+ if (err)
+ return err;
+
+ randomise_fences(&fc);
+
+ tsk = kthread_run(__wait_fence_chains, &fc, "dmabuf/wait");
+ if (IS_ERR(tsk)) {
+ err = PTR_ERR(tsk);
+ goto err;
+ }
+ get_task_struct(tsk);
+ yield_to(tsk, true);
+
+ for (i = 0; i < fc.chain_length; i++)
+ dma_fence_signal(fc.fences[i]);
+
+ err = kthread_stop(tsk);
+ put_task_struct(tsk);
+
+err:
+ fence_chains_fini(&fc);
+ return err;
+}
+
+int dma_fence_chain(void)
+{
+ static const struct subtest tests[] = {
+ SUBTEST(sanitycheck),
+ SUBTEST(find_seqno),
+ SUBTEST(find_signaled),
+ SUBTEST(find_out_of_order),
+ SUBTEST(find_gap),
+ SUBTEST(find_race),
+ SUBTEST(signal_forward),
+ SUBTEST(signal_backward),
+ SUBTEST(wait_forward),
+ SUBTEST(wait_backward),
+ SUBTEST(wait_random),
+ };
+ int ret;
+
+ pr_info("sizeof(dma_fence_chain)=%zu\n",
+ sizeof(struct dma_fence_chain));
+
+ slab_fences = KMEM_CACHE(mock_fence,
+ SLAB_TYPESAFE_BY_RCU |
+ SLAB_HWCACHE_ALIGN);
+ if (!slab_fences)
+ return -ENOMEM;
+
+ ret = subtests(tests, NULL);
+
+ kmem_cache_destroy(slab_fences);
+ return ret;
+}
diff --git a/drivers/dma-buf/st-dma-fence-unwrap.c b/drivers/dma-buf/st-dma-fence-unwrap.c
new file mode 100644
index 000000000..f0cee984b
--- /dev/null
+++ b/drivers/dma-buf/st-dma-fence-unwrap.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: MIT
+
+/*
+ * Copyright (C) 2022 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/dma-fence.h>
+#include <linux/dma-fence-array.h>
+#include <linux/dma-fence-chain.h>
+#include <linux/dma-fence-unwrap.h>
+
+#include "selftest.h"
+
+#define CHAIN_SZ (4 << 10)
+
+struct mock_fence {
+ struct dma_fence base;
+ spinlock_t lock;
+};
+
+static const char *mock_name(struct dma_fence *f)
+{
+ return "mock";
+}
+
+static const struct dma_fence_ops mock_ops = {
+ .get_driver_name = mock_name,
+ .get_timeline_name = mock_name,
+};
+
+static struct dma_fence *mock_fence(void)
+{
+ struct mock_fence *f;
+
+ f = kmalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ spin_lock_init(&f->lock);
+ dma_fence_init(&f->base, &mock_ops, &f->lock,
+ dma_fence_context_alloc(1), 1);
+
+ return &f->base;
+}
+
+static struct dma_fence *mock_array(unsigned int num_fences, ...)
+{
+ struct dma_fence_array *array;
+ struct dma_fence **fences;
+ va_list valist;
+ int i;
+
+ fences = kcalloc(num_fences, sizeof(*fences), GFP_KERNEL);
+ if (!fences)
+ goto error_put;
+
+ va_start(valist, num_fences);
+ for (i = 0; i < num_fences; ++i)
+ fences[i] = va_arg(valist, typeof(*fences));
+ va_end(valist);
+
+ array = dma_fence_array_create(num_fences, fences,
+ dma_fence_context_alloc(1),
+ 1, false);
+ if (!array)
+ goto error_free;
+ return &array->base;
+
+error_free:
+ kfree(fences);
+
+error_put:
+ va_start(valist, num_fences);
+ for (i = 0; i < num_fences; ++i)
+ dma_fence_put(va_arg(valist, typeof(*fences)));
+ va_end(valist);
+ return NULL;
+}
+
+static struct dma_fence *mock_chain(struct dma_fence *prev,
+ struct dma_fence *fence)
+{
+ struct dma_fence_chain *f;
+
+ f = dma_fence_chain_alloc();
+ if (!f) {
+ dma_fence_put(prev);
+ dma_fence_put(fence);
+ return NULL;
+ }
+
+ dma_fence_chain_init(f, prev, fence, 1);
+ return &f->base;
+}
+
+static int sanitycheck(void *arg)
+{
+ struct dma_fence *f, *chain, *array;
+ int err = 0;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f);
+
+ array = mock_array(1, f);
+ if (!array)
+ return -ENOMEM;
+
+ chain = mock_chain(NULL, array);
+ if (!chain)
+ return -ENOMEM;
+
+ dma_fence_put(chain);
+ return err;
+}
+
+static int unwrap_array(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *array;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+
+ f1 = mock_fence();
+ if (!f1)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f1);
+
+ f2 = mock_fence();
+ if (!f2) {
+ dma_fence_put(f1);
+ return -ENOMEM;
+ }
+
+ dma_fence_enable_sw_signaling(f2);
+
+ array = mock_array(2, f1, f2);
+ if (!array)
+ return -ENOMEM;
+
+ dma_fence_unwrap_for_each(fence, &iter, array) {
+ if (fence == f1) {
+ f1 = NULL;
+ } else if (fence == f2) {
+ f2 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1 || f2) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(array);
+ return err;
+}
+
+static int unwrap_chain(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *chain;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+
+ f1 = mock_fence();
+ if (!f1)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f1);
+
+ f2 = mock_fence();
+ if (!f2) {
+ dma_fence_put(f1);
+ return -ENOMEM;
+ }
+
+ dma_fence_enable_sw_signaling(f2);
+
+ chain = mock_chain(f1, f2);
+ if (!chain)
+ return -ENOMEM;
+
+ dma_fence_unwrap_for_each(fence, &iter, chain) {
+ if (fence == f1) {
+ f1 = NULL;
+ } else if (fence == f2) {
+ f2 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1 || f2) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(chain);
+ return err;
+}
+
+static int unwrap_chain_array(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *array, *chain;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+
+ f1 = mock_fence();
+ if (!f1)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f1);
+
+ f2 = mock_fence();
+ if (!f2) {
+ dma_fence_put(f1);
+ return -ENOMEM;
+ }
+
+ dma_fence_enable_sw_signaling(f2);
+
+ array = mock_array(2, f1, f2);
+ if (!array)
+ return -ENOMEM;
+
+ chain = mock_chain(NULL, array);
+ if (!chain)
+ return -ENOMEM;
+
+ dma_fence_unwrap_for_each(fence, &iter, chain) {
+ if (fence == f1) {
+ f1 = NULL;
+ } else if (fence == f2) {
+ f2 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1 || f2) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(chain);
+ return err;
+}
+
+static int unwrap_merge(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *f3;
+ struct dma_fence_unwrap iter;
+ int err = 0;
+
+ f1 = mock_fence();
+ if (!f1)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f1);
+
+ f2 = mock_fence();
+ if (!f2) {
+ err = -ENOMEM;
+ goto error_put_f1;
+ }
+
+ dma_fence_enable_sw_signaling(f2);
+
+ f3 = dma_fence_unwrap_merge(f1, f2);
+ if (!f3) {
+ err = -ENOMEM;
+ goto error_put_f2;
+ }
+
+ dma_fence_unwrap_for_each(fence, &iter, f3) {
+ if (fence == f1) {
+ dma_fence_put(f1);
+ f1 = NULL;
+ } else if (fence == f2) {
+ dma_fence_put(f2);
+ f2 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1 || f2) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(f3);
+error_put_f2:
+ dma_fence_put(f2);
+error_put_f1:
+ dma_fence_put(f1);
+ return err;
+}
+
+static int unwrap_merge_complex(void *arg)
+{
+ struct dma_fence *fence, *f1, *f2, *f3, *f4, *f5;
+ struct dma_fence_unwrap iter;
+ int err = -ENOMEM;
+
+ f1 = mock_fence();
+ if (!f1)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f1);
+
+ f2 = mock_fence();
+ if (!f2)
+ goto error_put_f1;
+
+ dma_fence_enable_sw_signaling(f2);
+
+ f3 = dma_fence_unwrap_merge(f1, f2);
+ if (!f3)
+ goto error_put_f2;
+
+ /* The resulting array has the fences in reverse */
+ f4 = dma_fence_unwrap_merge(f2, f1);
+ if (!f4)
+ goto error_put_f3;
+
+ /* Signaled fences should be filtered, the two arrays merged. */
+ f5 = dma_fence_unwrap_merge(f3, f4, dma_fence_get_stub());
+ if (!f5)
+ goto error_put_f4;
+
+ err = 0;
+ dma_fence_unwrap_for_each(fence, &iter, f5) {
+ if (fence == f1) {
+ dma_fence_put(f1);
+ f1 = NULL;
+ } else if (fence == f2) {
+ dma_fence_put(f2);
+ f2 = NULL;
+ } else {
+ pr_err("Unexpected fence!\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (f1 || f2) {
+ pr_err("Not all fences seen!\n");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(f5);
+error_put_f4:
+ dma_fence_put(f4);
+error_put_f3:
+ dma_fence_put(f3);
+error_put_f2:
+ dma_fence_put(f2);
+error_put_f1:
+ dma_fence_put(f1);
+ return err;
+}
+
+int dma_fence_unwrap(void)
+{
+ static const struct subtest tests[] = {
+ SUBTEST(sanitycheck),
+ SUBTEST(unwrap_array),
+ SUBTEST(unwrap_chain),
+ SUBTEST(unwrap_chain_array),
+ SUBTEST(unwrap_merge),
+ SUBTEST(unwrap_merge_complex),
+ };
+
+ return subtests(tests, NULL);
+}
diff --git a/drivers/dma-buf/st-dma-fence.c b/drivers/dma-buf/st-dma-fence.c
new file mode 100644
index 000000000..fb6e0a6ae
--- /dev/null
+++ b/drivers/dma-buf/st-dma-fence.c
@@ -0,0 +1,593 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/delay.h>
+#include <linux/dma-fence.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#include "selftest.h"
+
+static struct kmem_cache *slab_fences;
+
+static struct mock_fence {
+ struct dma_fence base;
+ struct spinlock lock;
+} *to_mock_fence(struct dma_fence *f) {
+ return container_of(f, struct mock_fence, base);
+}
+
+static const char *mock_name(struct dma_fence *f)
+{
+ return "mock";
+}
+
+static void mock_fence_release(struct dma_fence *f)
+{
+ kmem_cache_free(slab_fences, to_mock_fence(f));
+}
+
+struct wait_cb {
+ struct dma_fence_cb cb;
+ struct task_struct *task;
+};
+
+static void mock_wakeup(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ wake_up_process(container_of(cb, struct wait_cb, cb)->task);
+}
+
+static long mock_wait(struct dma_fence *f, bool intr, long timeout)
+{
+ const int state = intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
+ struct wait_cb cb = { .task = current };
+
+ if (dma_fence_add_callback(f, &cb.cb, mock_wakeup))
+ return timeout;
+
+ while (timeout) {
+ set_current_state(state);
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
+ break;
+
+ if (signal_pending_state(state, current))
+ break;
+
+ timeout = schedule_timeout(timeout);
+ }
+ __set_current_state(TASK_RUNNING);
+
+ if (!dma_fence_remove_callback(f, &cb.cb))
+ return timeout;
+
+ if (signal_pending_state(state, current))
+ return -ERESTARTSYS;
+
+ return -ETIME;
+}
+
+static const struct dma_fence_ops mock_ops = {
+ .get_driver_name = mock_name,
+ .get_timeline_name = mock_name,
+ .wait = mock_wait,
+ .release = mock_fence_release,
+};
+
+static struct dma_fence *mock_fence(void)
+{
+ struct mock_fence *f;
+
+ f = kmem_cache_alloc(slab_fences, GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ spin_lock_init(&f->lock);
+ dma_fence_init(&f->base, &mock_ops, &f->lock, 0, 0);
+
+ return &f->base;
+}
+
+static int sanitycheck(void *arg)
+{
+ struct dma_fence *f;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f);
+
+ dma_fence_signal(f);
+ dma_fence_put(f);
+
+ return 0;
+}
+
+static int test_signaling(void *arg)
+{
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f);
+
+ if (dma_fence_is_signaled(f)) {
+ pr_err("Fence unexpectedly signaled on creation\n");
+ goto err_free;
+ }
+
+ if (dma_fence_signal(f)) {
+ pr_err("Fence reported being already signaled\n");
+ goto err_free;
+ }
+
+ if (!dma_fence_is_signaled(f)) {
+ pr_err("Fence not reporting signaled\n");
+ goto err_free;
+ }
+
+ if (!dma_fence_signal(f)) {
+ pr_err("Fence reported not being already signaled\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_put(f);
+ return err;
+}
+
+struct simple_cb {
+ struct dma_fence_cb cb;
+ bool seen;
+};
+
+static void simple_callback(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ smp_store_mb(container_of(cb, struct simple_cb, cb)->seen, true);
+}
+
+static int test_add_callback(void *arg)
+{
+ struct simple_cb cb = {};
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
+ pr_err("Failed to add callback, fence already signaled!\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+ if (!cb.seen) {
+ pr_err("Callback failed!\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_put(f);
+ return err;
+}
+
+static int test_late_add_callback(void *arg)
+{
+ struct simple_cb cb = {};
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f);
+
+ dma_fence_signal(f);
+
+ if (!dma_fence_add_callback(f, &cb.cb, simple_callback)) {
+ pr_err("Added callback, but fence was already signaled!\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+ if (cb.seen) {
+ pr_err("Callback called after failed attachment !\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_put(f);
+ return err;
+}
+
+static int test_rm_callback(void *arg)
+{
+ struct simple_cb cb = {};
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
+ pr_err("Failed to add callback, fence already signaled!\n");
+ goto err_free;
+ }
+
+ if (!dma_fence_remove_callback(f, &cb.cb)) {
+ pr_err("Failed to remove callback!\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+ if (cb.seen) {
+ pr_err("Callback still signaled after removal!\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_put(f);
+ return err;
+}
+
+static int test_late_rm_callback(void *arg)
+{
+ struct simple_cb cb = {};
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ if (dma_fence_add_callback(f, &cb.cb, simple_callback)) {
+ pr_err("Failed to add callback, fence already signaled!\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+ if (!cb.seen) {
+ pr_err("Callback failed!\n");
+ goto err_free;
+ }
+
+ if (dma_fence_remove_callback(f, &cb.cb)) {
+ pr_err("Callback removal succeed after being executed!\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_put(f);
+ return err;
+}
+
+static int test_status(void *arg)
+{
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f);
+
+ if (dma_fence_get_status(f)) {
+ pr_err("Fence unexpectedly has signaled status on creation\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+ if (!dma_fence_get_status(f)) {
+ pr_err("Fence not reporting signaled status\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_put(f);
+ return err;
+}
+
+static int test_error(void *arg)
+{
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f);
+
+ dma_fence_set_error(f, -EIO);
+
+ if (dma_fence_get_status(f)) {
+ pr_err("Fence unexpectedly has error status before signal\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+ if (dma_fence_get_status(f) != -EIO) {
+ pr_err("Fence not reporting error status, got %d\n",
+ dma_fence_get_status(f));
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_put(f);
+ return err;
+}
+
+static int test_wait(void *arg)
+{
+ struct dma_fence *f;
+ int err = -EINVAL;
+
+ f = mock_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f);
+
+ if (dma_fence_wait_timeout(f, false, 0) != -ETIME) {
+ pr_err("Wait reported complete before being signaled\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+
+ if (dma_fence_wait_timeout(f, false, 0) != 0) {
+ pr_err("Wait reported incomplete after being signaled\n");
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ dma_fence_signal(f);
+ dma_fence_put(f);
+ return err;
+}
+
+struct wait_timer {
+ struct timer_list timer;
+ struct dma_fence *f;
+};
+
+static void wait_timer(struct timer_list *timer)
+{
+ struct wait_timer *wt = from_timer(wt, timer, timer);
+
+ dma_fence_signal(wt->f);
+}
+
+static int test_wait_timeout(void *arg)
+{
+ struct wait_timer wt;
+ int err = -EINVAL;
+
+ timer_setup_on_stack(&wt.timer, wait_timer, 0);
+
+ wt.f = mock_fence();
+ if (!wt.f)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(wt.f);
+
+ if (dma_fence_wait_timeout(wt.f, false, 1) != -ETIME) {
+ pr_err("Wait reported complete before being signaled\n");
+ goto err_free;
+ }
+
+ mod_timer(&wt.timer, jiffies + 1);
+
+ if (dma_fence_wait_timeout(wt.f, false, 2) == -ETIME) {
+ if (timer_pending(&wt.timer)) {
+ pr_notice("Timer did not fire within the jiffie!\n");
+ err = 0; /* not our fault! */
+ } else {
+ pr_err("Wait reported incomplete after timeout\n");
+ }
+ goto err_free;
+ }
+
+ err = 0;
+err_free:
+ del_timer_sync(&wt.timer);
+ destroy_timer_on_stack(&wt.timer);
+ dma_fence_signal(wt.f);
+ dma_fence_put(wt.f);
+ return err;
+}
+
+static int test_stub(void *arg)
+{
+ struct dma_fence *f[64];
+ int err = -EINVAL;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(f); i++) {
+ f[i] = dma_fence_get_stub();
+ if (!dma_fence_is_signaled(f[i])) {
+ pr_err("Obtained unsignaled stub fence!\n");
+ goto err;
+ }
+ }
+
+ err = 0;
+err:
+ while (i--)
+ dma_fence_put(f[i]);
+ return err;
+}
+
+/* Now off to the races! */
+
+struct race_thread {
+ struct dma_fence __rcu **fences;
+ struct task_struct *task;
+ bool before;
+ int id;
+};
+
+static void __wait_for_callbacks(struct dma_fence *f)
+{
+ spin_lock_irq(f->lock);
+ spin_unlock_irq(f->lock);
+}
+
+static int thread_signal_callback(void *arg)
+{
+ const struct race_thread *t = arg;
+ unsigned long pass = 0;
+ unsigned long miss = 0;
+ int err = 0;
+
+ while (!err && !kthread_should_stop()) {
+ struct dma_fence *f1, *f2;
+ struct simple_cb cb;
+
+ f1 = mock_fence();
+ if (!f1) {
+ err = -ENOMEM;
+ break;
+ }
+
+ dma_fence_enable_sw_signaling(f1);
+
+ rcu_assign_pointer(t->fences[t->id], f1);
+ smp_wmb();
+
+ rcu_read_lock();
+ do {
+ f2 = dma_fence_get_rcu_safe(&t->fences[!t->id]);
+ } while (!f2 && !kthread_should_stop());
+ rcu_read_unlock();
+
+ if (t->before)
+ dma_fence_signal(f1);
+
+ smp_store_mb(cb.seen, false);
+ if (!f2 ||
+ dma_fence_add_callback(f2, &cb.cb, simple_callback)) {
+ miss++;
+ cb.seen = true;
+ }
+
+ if (!t->before)
+ dma_fence_signal(f1);
+
+ if (!cb.seen) {
+ dma_fence_wait(f2, false);
+ __wait_for_callbacks(f2);
+ }
+
+ if (!READ_ONCE(cb.seen)) {
+ pr_err("Callback not seen on thread %d, pass %lu (%lu misses), signaling %s add_callback; fence signaled? %s\n",
+ t->id, pass, miss,
+ t->before ? "before" : "after",
+ dma_fence_is_signaled(f2) ? "yes" : "no");
+ err = -EINVAL;
+ }
+
+ dma_fence_put(f2);
+
+ rcu_assign_pointer(t->fences[t->id], NULL);
+ smp_wmb();
+
+ dma_fence_put(f1);
+
+ pass++;
+ }
+
+ pr_info("%s[%d] completed %lu passes, %lu misses\n",
+ __func__, t->id, pass, miss);
+ return err;
+}
+
+static int race_signal_callback(void *arg)
+{
+ struct dma_fence __rcu *f[2] = {};
+ int ret = 0;
+ int pass;
+
+ for (pass = 0; !ret && pass <= 1; pass++) {
+ struct race_thread t[2];
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(t); i++) {
+ t[i].fences = f;
+ t[i].id = i;
+ t[i].before = pass;
+ t[i].task = kthread_run(thread_signal_callback, &t[i],
+ "dma-fence:%d", i);
+ get_task_struct(t[i].task);
+ }
+
+ msleep(50);
+
+ for (i = 0; i < ARRAY_SIZE(t); i++) {
+ int err;
+
+ err = kthread_stop(t[i].task);
+ if (err && !ret)
+ ret = err;
+
+ put_task_struct(t[i].task);
+ }
+ }
+
+ return ret;
+}
+
+int dma_fence(void)
+{
+ static const struct subtest tests[] = {
+ SUBTEST(sanitycheck),
+ SUBTEST(test_signaling),
+ SUBTEST(test_add_callback),
+ SUBTEST(test_late_add_callback),
+ SUBTEST(test_rm_callback),
+ SUBTEST(test_late_rm_callback),
+ SUBTEST(test_status),
+ SUBTEST(test_error),
+ SUBTEST(test_wait),
+ SUBTEST(test_wait_timeout),
+ SUBTEST(test_stub),
+ SUBTEST(race_signal_callback),
+ };
+ int ret;
+
+ pr_info("sizeof(dma_fence)=%zu\n", sizeof(struct dma_fence));
+
+ slab_fences = KMEM_CACHE(mock_fence,
+ SLAB_TYPESAFE_BY_RCU |
+ SLAB_HWCACHE_ALIGN);
+ if (!slab_fences)
+ return -ENOMEM;
+
+ ret = subtests(tests, NULL);
+
+ kmem_cache_destroy(slab_fences);
+
+ return ret;
+}
diff --git a/drivers/dma-buf/st-dma-resv.c b/drivers/dma-buf/st-dma-resv.c
new file mode 100644
index 000000000..15dbea146
--- /dev/null
+++ b/drivers/dma-buf/st-dma-resv.c
@@ -0,0 +1,316 @@
+/* SPDX-License-Identifier: MIT */
+
+/*
+* Copyright © 2019 Intel Corporation
+* Copyright © 2021 Advanced Micro Devices, Inc.
+*/
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/dma-resv.h>
+
+#include "selftest.h"
+
+static struct spinlock fence_lock;
+
+static const char *fence_name(struct dma_fence *f)
+{
+ return "selftest";
+}
+
+static const struct dma_fence_ops fence_ops = {
+ .get_driver_name = fence_name,
+ .get_timeline_name = fence_name,
+};
+
+static struct dma_fence *alloc_fence(void)
+{
+ struct dma_fence *f;
+
+ f = kmalloc(sizeof(*f), GFP_KERNEL);
+ if (!f)
+ return NULL;
+
+ dma_fence_init(f, &fence_ops, &fence_lock, 0, 0);
+ return f;
+}
+
+static int sanitycheck(void *arg)
+{
+ struct dma_resv resv;
+ struct dma_fence *f;
+ int r;
+
+ f = alloc_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f);
+
+ dma_fence_signal(f);
+ dma_fence_put(f);
+
+ dma_resv_init(&resv);
+ r = dma_resv_lock(&resv, NULL);
+ if (r)
+ pr_err("Resv locking failed\n");
+ else
+ dma_resv_unlock(&resv);
+ dma_resv_fini(&resv);
+ return r;
+}
+
+static int test_signaling(void *arg)
+{
+ enum dma_resv_usage usage = (unsigned long)arg;
+ struct dma_resv resv;
+ struct dma_fence *f;
+ int r;
+
+ f = alloc_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f);
+
+ dma_resv_init(&resv);
+ r = dma_resv_lock(&resv, NULL);
+ if (r) {
+ pr_err("Resv locking failed\n");
+ goto err_free;
+ }
+
+ r = dma_resv_reserve_fences(&resv, 1);
+ if (r) {
+ pr_err("Resv shared slot allocation failed\n");
+ goto err_unlock;
+ }
+
+ dma_resv_add_fence(&resv, f, usage);
+ if (dma_resv_test_signaled(&resv, usage)) {
+ pr_err("Resv unexpectedly signaled\n");
+ r = -EINVAL;
+ goto err_unlock;
+ }
+ dma_fence_signal(f);
+ if (!dma_resv_test_signaled(&resv, usage)) {
+ pr_err("Resv not reporting signaled\n");
+ r = -EINVAL;
+ goto err_unlock;
+ }
+err_unlock:
+ dma_resv_unlock(&resv);
+err_free:
+ dma_resv_fini(&resv);
+ dma_fence_put(f);
+ return r;
+}
+
+static int test_for_each(void *arg)
+{
+ enum dma_resv_usage usage = (unsigned long)arg;
+ struct dma_resv_iter cursor;
+ struct dma_fence *f, *fence;
+ struct dma_resv resv;
+ int r;
+
+ f = alloc_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f);
+
+ dma_resv_init(&resv);
+ r = dma_resv_lock(&resv, NULL);
+ if (r) {
+ pr_err("Resv locking failed\n");
+ goto err_free;
+ }
+
+ r = dma_resv_reserve_fences(&resv, 1);
+ if (r) {
+ pr_err("Resv shared slot allocation failed\n");
+ goto err_unlock;
+ }
+
+ dma_resv_add_fence(&resv, f, usage);
+
+ r = -ENOENT;
+ dma_resv_for_each_fence(&cursor, &resv, usage, fence) {
+ if (!r) {
+ pr_err("More than one fence found\n");
+ r = -EINVAL;
+ goto err_unlock;
+ }
+ if (f != fence) {
+ pr_err("Unexpected fence\n");
+ r = -EINVAL;
+ goto err_unlock;
+ }
+ if (dma_resv_iter_usage(&cursor) != usage) {
+ pr_err("Unexpected fence usage\n");
+ r = -EINVAL;
+ goto err_unlock;
+ }
+ r = 0;
+ }
+ if (r) {
+ pr_err("No fence found\n");
+ goto err_unlock;
+ }
+ dma_fence_signal(f);
+err_unlock:
+ dma_resv_unlock(&resv);
+err_free:
+ dma_resv_fini(&resv);
+ dma_fence_put(f);
+ return r;
+}
+
+static int test_for_each_unlocked(void *arg)
+{
+ enum dma_resv_usage usage = (unsigned long)arg;
+ struct dma_resv_iter cursor;
+ struct dma_fence *f, *fence;
+ struct dma_resv resv;
+ int r;
+
+ f = alloc_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f);
+
+ dma_resv_init(&resv);
+ r = dma_resv_lock(&resv, NULL);
+ if (r) {
+ pr_err("Resv locking failed\n");
+ goto err_free;
+ }
+
+ r = dma_resv_reserve_fences(&resv, 1);
+ if (r) {
+ pr_err("Resv shared slot allocation failed\n");
+ dma_resv_unlock(&resv);
+ goto err_free;
+ }
+
+ dma_resv_add_fence(&resv, f, usage);
+ dma_resv_unlock(&resv);
+
+ r = -ENOENT;
+ dma_resv_iter_begin(&cursor, &resv, usage);
+ dma_resv_for_each_fence_unlocked(&cursor, fence) {
+ if (!r) {
+ pr_err("More than one fence found\n");
+ r = -EINVAL;
+ goto err_iter_end;
+ }
+ if (!dma_resv_iter_is_restarted(&cursor)) {
+ pr_err("No restart flag\n");
+ goto err_iter_end;
+ }
+ if (f != fence) {
+ pr_err("Unexpected fence\n");
+ r = -EINVAL;
+ goto err_iter_end;
+ }
+ if (dma_resv_iter_usage(&cursor) != usage) {
+ pr_err("Unexpected fence usage\n");
+ r = -EINVAL;
+ goto err_iter_end;
+ }
+
+ /* We use r as state here */
+ if (r == -ENOENT) {
+ r = -EINVAL;
+ /* That should trigger an restart */
+ cursor.fences = (void*)~0;
+ } else if (r == -EINVAL) {
+ r = 0;
+ }
+ }
+ if (r)
+ pr_err("No fence found\n");
+err_iter_end:
+ dma_resv_iter_end(&cursor);
+ dma_fence_signal(f);
+err_free:
+ dma_resv_fini(&resv);
+ dma_fence_put(f);
+ return r;
+}
+
+static int test_get_fences(void *arg)
+{
+ enum dma_resv_usage usage = (unsigned long)arg;
+ struct dma_fence *f, **fences = NULL;
+ struct dma_resv resv;
+ int r, i;
+
+ f = alloc_fence();
+ if (!f)
+ return -ENOMEM;
+
+ dma_fence_enable_sw_signaling(f);
+
+ dma_resv_init(&resv);
+ r = dma_resv_lock(&resv, NULL);
+ if (r) {
+ pr_err("Resv locking failed\n");
+ goto err_resv;
+ }
+
+ r = dma_resv_reserve_fences(&resv, 1);
+ if (r) {
+ pr_err("Resv shared slot allocation failed\n");
+ dma_resv_unlock(&resv);
+ goto err_resv;
+ }
+
+ dma_resv_add_fence(&resv, f, usage);
+ dma_resv_unlock(&resv);
+
+ r = dma_resv_get_fences(&resv, usage, &i, &fences);
+ if (r) {
+ pr_err("get_fences failed\n");
+ goto err_free;
+ }
+
+ if (i != 1 || fences[0] != f) {
+ pr_err("get_fences returned unexpected fence\n");
+ goto err_free;
+ }
+
+ dma_fence_signal(f);
+err_free:
+ while (i--)
+ dma_fence_put(fences[i]);
+ kfree(fences);
+err_resv:
+ dma_resv_fini(&resv);
+ dma_fence_put(f);
+ return r;
+}
+
+int dma_resv(void)
+{
+ static const struct subtest tests[] = {
+ SUBTEST(sanitycheck),
+ SUBTEST(test_signaling),
+ SUBTEST(test_for_each),
+ SUBTEST(test_for_each_unlocked),
+ SUBTEST(test_get_fences),
+ };
+ enum dma_resv_usage usage;
+ int r;
+
+ spin_lock_init(&fence_lock);
+ for (usage = DMA_RESV_USAGE_KERNEL; usage <= DMA_RESV_USAGE_BOOKKEEP;
+ ++usage) {
+ r = subtests(tests, (void *)(unsigned long)usage);
+ if (r)
+ return r;
+ }
+ return 0;
+}
diff --git a/drivers/dma-buf/sw_sync.c b/drivers/dma-buf/sw_sync.c
new file mode 100644
index 000000000..7f5ed1aa7
--- /dev/null
+++ b/drivers/dma-buf/sw_sync.c
@@ -0,0 +1,412 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Sync File validation framework
+ *
+ * Copyright (C) 2012 Google, Inc.
+ */
+
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/sync_file.h>
+
+#include "sync_debug.h"
+
+#define CREATE_TRACE_POINTS
+#include "sync_trace.h"
+
+/*
+ * SW SYNC validation framework
+ *
+ * A sync object driver that uses a 32bit counter to coordinate
+ * synchronization. Useful when there is no hardware primitive backing
+ * the synchronization.
+ *
+ * To start the framework just open:
+ *
+ * <debugfs>/sync/sw_sync
+ *
+ * That will create a sync timeline, all fences created under this timeline
+ * file descriptor will belong to the this timeline.
+ *
+ * The 'sw_sync' file can be opened many times as to create different
+ * timelines.
+ *
+ * Fences can be created with SW_SYNC_IOC_CREATE_FENCE ioctl with struct
+ * sw_sync_create_fence_data as parameter.
+ *
+ * To increment the timeline counter, SW_SYNC_IOC_INC ioctl should be used
+ * with the increment as u32. This will update the last signaled value
+ * from the timeline and signal any fence that has a seqno smaller or equal
+ * to it.
+ *
+ * struct sw_sync_create_fence_data
+ * @value: the seqno to initialise the fence with
+ * @name: the name of the new sync point
+ * @fence: return the fd of the new sync_file with the created fence
+ */
+struct sw_sync_create_fence_data {
+ __u32 value;
+ char name[32];
+ __s32 fence; /* fd of new fence */
+};
+
+#define SW_SYNC_IOC_MAGIC 'W'
+
+#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
+ struct sw_sync_create_fence_data)
+
+#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
+
+static const struct dma_fence_ops timeline_fence_ops;
+
+static inline struct sync_pt *dma_fence_to_sync_pt(struct dma_fence *fence)
+{
+ if (fence->ops != &timeline_fence_ops)
+ return NULL;
+ return container_of(fence, struct sync_pt, base);
+}
+
+/**
+ * sync_timeline_create() - creates a sync object
+ * @name: sync_timeline name
+ *
+ * Creates a new sync_timeline. Returns the sync_timeline object or NULL in
+ * case of error.
+ */
+static struct sync_timeline *sync_timeline_create(const char *name)
+{
+ struct sync_timeline *obj;
+
+ obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+ if (!obj)
+ return NULL;
+
+ kref_init(&obj->kref);
+ obj->context = dma_fence_context_alloc(1);
+ strlcpy(obj->name, name, sizeof(obj->name));
+
+ obj->pt_tree = RB_ROOT;
+ INIT_LIST_HEAD(&obj->pt_list);
+ spin_lock_init(&obj->lock);
+
+ sync_timeline_debug_add(obj);
+
+ return obj;
+}
+
+static void sync_timeline_free(struct kref *kref)
+{
+ struct sync_timeline *obj =
+ container_of(kref, struct sync_timeline, kref);
+
+ sync_timeline_debug_remove(obj);
+
+ kfree(obj);
+}
+
+static void sync_timeline_get(struct sync_timeline *obj)
+{
+ kref_get(&obj->kref);
+}
+
+static void sync_timeline_put(struct sync_timeline *obj)
+{
+ kref_put(&obj->kref, sync_timeline_free);
+}
+
+static const char *timeline_fence_get_driver_name(struct dma_fence *fence)
+{
+ return "sw_sync";
+}
+
+static const char *timeline_fence_get_timeline_name(struct dma_fence *fence)
+{
+ struct sync_timeline *parent = dma_fence_parent(fence);
+
+ return parent->name;
+}
+
+static void timeline_fence_release(struct dma_fence *fence)
+{
+ struct sync_pt *pt = dma_fence_to_sync_pt(fence);
+ struct sync_timeline *parent = dma_fence_parent(fence);
+ unsigned long flags;
+
+ spin_lock_irqsave(fence->lock, flags);
+ if (!list_empty(&pt->link)) {
+ list_del(&pt->link);
+ rb_erase(&pt->node, &parent->pt_tree);
+ }
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ sync_timeline_put(parent);
+ dma_fence_free(fence);
+}
+
+static bool timeline_fence_signaled(struct dma_fence *fence)
+{
+ struct sync_timeline *parent = dma_fence_parent(fence);
+
+ return !__dma_fence_is_later(fence->seqno, parent->value, fence->ops);
+}
+
+static bool timeline_fence_enable_signaling(struct dma_fence *fence)
+{
+ return true;
+}
+
+static void timeline_fence_value_str(struct dma_fence *fence,
+ char *str, int size)
+{
+ snprintf(str, size, "%lld", fence->seqno);
+}
+
+static void timeline_fence_timeline_value_str(struct dma_fence *fence,
+ char *str, int size)
+{
+ struct sync_timeline *parent = dma_fence_parent(fence);
+
+ snprintf(str, size, "%d", parent->value);
+}
+
+static const struct dma_fence_ops timeline_fence_ops = {
+ .get_driver_name = timeline_fence_get_driver_name,
+ .get_timeline_name = timeline_fence_get_timeline_name,
+ .enable_signaling = timeline_fence_enable_signaling,
+ .signaled = timeline_fence_signaled,
+ .release = timeline_fence_release,
+ .fence_value_str = timeline_fence_value_str,
+ .timeline_value_str = timeline_fence_timeline_value_str,
+};
+
+/**
+ * sync_timeline_signal() - signal a status change on a sync_timeline
+ * @obj: sync_timeline to signal
+ * @inc: num to increment on timeline->value
+ *
+ * A sync implementation should call this any time one of it's fences
+ * has signaled or has an error condition.
+ */
+static void sync_timeline_signal(struct sync_timeline *obj, unsigned int inc)
+{
+ LIST_HEAD(signalled);
+ struct sync_pt *pt, *next;
+
+ trace_sync_timeline(obj);
+
+ spin_lock_irq(&obj->lock);
+
+ obj->value += inc;
+
+ list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
+ if (!timeline_fence_signaled(&pt->base))
+ break;
+
+ dma_fence_get(&pt->base);
+
+ list_move_tail(&pt->link, &signalled);
+ rb_erase(&pt->node, &obj->pt_tree);
+
+ dma_fence_signal_locked(&pt->base);
+ }
+
+ spin_unlock_irq(&obj->lock);
+
+ list_for_each_entry_safe(pt, next, &signalled, link) {
+ list_del_init(&pt->link);
+ dma_fence_put(&pt->base);
+ }
+}
+
+/**
+ * sync_pt_create() - creates a sync pt
+ * @obj: parent sync_timeline
+ * @value: value of the fence
+ *
+ * Creates a new sync_pt (fence) as a child of @parent. @size bytes will be
+ * allocated allowing for implementation specific data to be kept after
+ * the generic sync_timeline struct. Returns the sync_pt object or
+ * NULL in case of error.
+ */
+static struct sync_pt *sync_pt_create(struct sync_timeline *obj,
+ unsigned int value)
+{
+ struct sync_pt *pt;
+
+ pt = kzalloc(sizeof(*pt), GFP_KERNEL);
+ if (!pt)
+ return NULL;
+
+ sync_timeline_get(obj);
+ dma_fence_init(&pt->base, &timeline_fence_ops, &obj->lock,
+ obj->context, value);
+ INIT_LIST_HEAD(&pt->link);
+
+ spin_lock_irq(&obj->lock);
+ if (!dma_fence_is_signaled_locked(&pt->base)) {
+ struct rb_node **p = &obj->pt_tree.rb_node;
+ struct rb_node *parent = NULL;
+
+ while (*p) {
+ struct sync_pt *other;
+ int cmp;
+
+ parent = *p;
+ other = rb_entry(parent, typeof(*pt), node);
+ cmp = value - other->base.seqno;
+ if (cmp > 0) {
+ p = &parent->rb_right;
+ } else if (cmp < 0) {
+ p = &parent->rb_left;
+ } else {
+ if (dma_fence_get_rcu(&other->base)) {
+ sync_timeline_put(obj);
+ kfree(pt);
+ pt = other;
+ goto unlock;
+ }
+ p = &parent->rb_left;
+ }
+ }
+ rb_link_node(&pt->node, parent, p);
+ rb_insert_color(&pt->node, &obj->pt_tree);
+
+ parent = rb_next(&pt->node);
+ list_add_tail(&pt->link,
+ parent ? &rb_entry(parent, typeof(*pt), node)->link : &obj->pt_list);
+ }
+unlock:
+ spin_unlock_irq(&obj->lock);
+
+ return pt;
+}
+
+/*
+ * *WARNING*
+ *
+ * improper use of this can result in deadlocking kernel drivers from userspace.
+ */
+
+/* opening sw_sync create a new sync obj */
+static int sw_sync_debugfs_open(struct inode *inode, struct file *file)
+{
+ struct sync_timeline *obj;
+ char task_comm[TASK_COMM_LEN];
+
+ get_task_comm(task_comm, current);
+
+ obj = sync_timeline_create(task_comm);
+ if (!obj)
+ return -ENOMEM;
+
+ file->private_data = obj;
+
+ return 0;
+}
+
+static int sw_sync_debugfs_release(struct inode *inode, struct file *file)
+{
+ struct sync_timeline *obj = file->private_data;
+ struct sync_pt *pt, *next;
+
+ spin_lock_irq(&obj->lock);
+
+ list_for_each_entry_safe(pt, next, &obj->pt_list, link) {
+ dma_fence_set_error(&pt->base, -ENOENT);
+ dma_fence_signal_locked(&pt->base);
+ }
+
+ spin_unlock_irq(&obj->lock);
+
+ sync_timeline_put(obj);
+ return 0;
+}
+
+static long sw_sync_ioctl_create_fence(struct sync_timeline *obj,
+ unsigned long arg)
+{
+ int fd = get_unused_fd_flags(O_CLOEXEC);
+ int err;
+ struct sync_pt *pt;
+ struct sync_file *sync_file;
+ struct sw_sync_create_fence_data data;
+
+ if (fd < 0)
+ return fd;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+ err = -EFAULT;
+ goto err;
+ }
+
+ pt = sync_pt_create(obj, data.value);
+ if (!pt) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ sync_file = sync_file_create(&pt->base);
+ dma_fence_put(&pt->base);
+ if (!sync_file) {
+ err = -ENOMEM;
+ goto err;
+ }
+
+ data.fence = fd;
+ if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+ fput(sync_file->file);
+ err = -EFAULT;
+ goto err;
+ }
+
+ fd_install(fd, sync_file->file);
+
+ return 0;
+
+err:
+ put_unused_fd(fd);
+ return err;
+}
+
+static long sw_sync_ioctl_inc(struct sync_timeline *obj, unsigned long arg)
+{
+ u32 value;
+
+ if (copy_from_user(&value, (void __user *)arg, sizeof(value)))
+ return -EFAULT;
+
+ while (value > INT_MAX) {
+ sync_timeline_signal(obj, INT_MAX);
+ value -= INT_MAX;
+ }
+
+ sync_timeline_signal(obj, value);
+
+ return 0;
+}
+
+static long sw_sync_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sync_timeline *obj = file->private_data;
+
+ switch (cmd) {
+ case SW_SYNC_IOC_CREATE_FENCE:
+ return sw_sync_ioctl_create_fence(obj, arg);
+
+ case SW_SYNC_IOC_INC:
+ return sw_sync_ioctl_inc(obj, arg);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+const struct file_operations sw_sync_debugfs_fops = {
+ .open = sw_sync_debugfs_open,
+ .release = sw_sync_debugfs_release,
+ .unlocked_ioctl = sw_sync_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+};
diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c
new file mode 100644
index 000000000..101394f16
--- /dev/null
+++ b/drivers/dma-buf/sync_debug.c
@@ -0,0 +1,190 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Sync File validation framework and debug information
+ *
+ * Copyright (C) 2012 Google, Inc.
+ */
+
+#include <linux/debugfs.h>
+#include "sync_debug.h"
+
+static struct dentry *dbgfs;
+
+static LIST_HEAD(sync_timeline_list_head);
+static DEFINE_SPINLOCK(sync_timeline_list_lock);
+static LIST_HEAD(sync_file_list_head);
+static DEFINE_SPINLOCK(sync_file_list_lock);
+
+void sync_timeline_debug_add(struct sync_timeline *obj)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sync_timeline_list_lock, flags);
+ list_add_tail(&obj->sync_timeline_list, &sync_timeline_list_head);
+ spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+}
+
+void sync_timeline_debug_remove(struct sync_timeline *obj)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sync_timeline_list_lock, flags);
+ list_del(&obj->sync_timeline_list);
+ spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+}
+
+void sync_file_debug_add(struct sync_file *sync_file)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sync_file_list_lock, flags);
+ list_add_tail(&sync_file->sync_file_list, &sync_file_list_head);
+ spin_unlock_irqrestore(&sync_file_list_lock, flags);
+}
+
+void sync_file_debug_remove(struct sync_file *sync_file)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sync_file_list_lock, flags);
+ list_del(&sync_file->sync_file_list);
+ spin_unlock_irqrestore(&sync_file_list_lock, flags);
+}
+
+static const char *sync_status_str(int status)
+{
+ if (status < 0)
+ return "error";
+
+ if (status > 0)
+ return "signaled";
+
+ return "active";
+}
+
+static void sync_print_fence(struct seq_file *s,
+ struct dma_fence *fence, bool show)
+{
+ struct sync_timeline *parent = dma_fence_parent(fence);
+ int status;
+
+ status = dma_fence_get_status_locked(fence);
+
+ seq_printf(s, " %s%sfence %s",
+ show ? parent->name : "",
+ show ? "_" : "",
+ sync_status_str(status));
+
+ if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) {
+ struct timespec64 ts64 =
+ ktime_to_timespec64(fence->timestamp);
+
+ seq_printf(s, "@%lld.%09ld", (s64)ts64.tv_sec, ts64.tv_nsec);
+ }
+
+ if (fence->ops->timeline_value_str &&
+ fence->ops->fence_value_str) {
+ char value[64];
+ bool success;
+
+ fence->ops->fence_value_str(fence, value, sizeof(value));
+ success = strlen(value);
+
+ if (success) {
+ seq_printf(s, ": %s", value);
+
+ fence->ops->timeline_value_str(fence, value,
+ sizeof(value));
+
+ if (strlen(value))
+ seq_printf(s, " / %s", value);
+ }
+ }
+
+ seq_putc(s, '\n');
+}
+
+static void sync_print_obj(struct seq_file *s, struct sync_timeline *obj)
+{
+ struct list_head *pos;
+
+ seq_printf(s, "%s: %d\n", obj->name, obj->value);
+
+ spin_lock_irq(&obj->lock);
+ list_for_each(pos, &obj->pt_list) {
+ struct sync_pt *pt = container_of(pos, struct sync_pt, link);
+ sync_print_fence(s, &pt->base, false);
+ }
+ spin_unlock_irq(&obj->lock);
+}
+
+static void sync_print_sync_file(struct seq_file *s,
+ struct sync_file *sync_file)
+{
+ char buf[128];
+ int i;
+
+ seq_printf(s, "[%p] %s: %s\n", sync_file,
+ sync_file_get_name(sync_file, buf, sizeof(buf)),
+ sync_status_str(dma_fence_get_status(sync_file->fence)));
+
+ if (dma_fence_is_array(sync_file->fence)) {
+ struct dma_fence_array *array = to_dma_fence_array(sync_file->fence);
+
+ for (i = 0; i < array->num_fences; ++i)
+ sync_print_fence(s, array->fences[i], true);
+ } else {
+ sync_print_fence(s, sync_file->fence, true);
+ }
+}
+
+static int sync_info_debugfs_show(struct seq_file *s, void *unused)
+{
+ struct list_head *pos;
+
+ seq_puts(s, "objs:\n--------------\n");
+
+ spin_lock_irq(&sync_timeline_list_lock);
+ list_for_each(pos, &sync_timeline_list_head) {
+ struct sync_timeline *obj =
+ container_of(pos, struct sync_timeline,
+ sync_timeline_list);
+
+ sync_print_obj(s, obj);
+ seq_putc(s, '\n');
+ }
+ spin_unlock_irq(&sync_timeline_list_lock);
+
+ seq_puts(s, "fences:\n--------------\n");
+
+ spin_lock_irq(&sync_file_list_lock);
+ list_for_each(pos, &sync_file_list_head) {
+ struct sync_file *sync_file =
+ container_of(pos, struct sync_file, sync_file_list);
+
+ sync_print_sync_file(s, sync_file);
+ seq_putc(s, '\n');
+ }
+ spin_unlock_irq(&sync_file_list_lock);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(sync_info_debugfs);
+
+static __init int sync_debugfs_init(void)
+{
+ dbgfs = debugfs_create_dir("sync", NULL);
+
+ /*
+ * The debugfs files won't ever get removed and thus, there is
+ * no need to protect it against removal races. The use of
+ * debugfs_create_file_unsafe() is actually safe here.
+ */
+ debugfs_create_file_unsafe("info", 0444, dbgfs, NULL,
+ &sync_info_debugfs_fops);
+ debugfs_create_file_unsafe("sw_sync", 0644, dbgfs, NULL,
+ &sw_sync_debugfs_fops);
+
+ return 0;
+}
+late_initcall(sync_debugfs_init);
diff --git a/drivers/dma-buf/sync_debug.h b/drivers/dma-buf/sync_debug.h
new file mode 100644
index 000000000..6176e52ba
--- /dev/null
+++ b/drivers/dma-buf/sync_debug.h
@@ -0,0 +1,72 @@
+/*
+ * Sync File validation framework and debug infomation
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_SYNC_H
+#define _LINUX_SYNC_H
+
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/spinlock.h>
+#include <linux/dma-fence.h>
+
+#include <linux/sync_file.h>
+#include <uapi/linux/sync_file.h>
+
+/**
+ * struct sync_timeline - sync object
+ * @kref: reference count on fence.
+ * @name: name of the sync_timeline. Useful for debugging
+ * @lock: lock protecting @pt_list and @value
+ * @pt_tree: rbtree of active (unsignaled/errored) sync_pts
+ * @pt_list: list of active (unsignaled/errored) sync_pts
+ * @sync_timeline_list: membership in global sync_timeline_list
+ */
+struct sync_timeline {
+ struct kref kref;
+ char name[32];
+
+ /* protected by lock */
+ u64 context;
+ int value;
+
+ struct rb_root pt_tree;
+ struct list_head pt_list;
+ spinlock_t lock;
+
+ struct list_head sync_timeline_list;
+};
+
+static inline struct sync_timeline *dma_fence_parent(struct dma_fence *fence)
+{
+ return container_of(fence->lock, struct sync_timeline, lock);
+}
+
+/**
+ * struct sync_pt - sync_pt object
+ * @base: base fence object
+ * @link: link on the sync timeline's list
+ * @node: node in the sync timeline's tree
+ */
+struct sync_pt {
+ struct dma_fence base;
+ struct list_head link;
+ struct rb_node node;
+};
+
+extern const struct file_operations sw_sync_debugfs_fops;
+
+void sync_timeline_debug_add(struct sync_timeline *obj);
+void sync_timeline_debug_remove(struct sync_timeline *obj);
+void sync_file_debug_add(struct sync_file *fence);
+void sync_file_debug_remove(struct sync_file *fence);
+
+#endif /* _LINUX_SYNC_H */
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
new file mode 100644
index 000000000..2e9a316c5
--- /dev/null
+++ b/drivers/dma-buf/sync_file.c
@@ -0,0 +1,372 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * drivers/dma-buf/sync_file.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ */
+
+#include <linux/dma-fence-unwrap.h>
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/anon_inodes.h>
+#include <linux/sync_file.h>
+#include <uapi/linux/sync_file.h>
+
+static const struct file_operations sync_file_fops;
+
+static struct sync_file *sync_file_alloc(void)
+{
+ struct sync_file *sync_file;
+
+ sync_file = kzalloc(sizeof(*sync_file), GFP_KERNEL);
+ if (!sync_file)
+ return NULL;
+
+ sync_file->file = anon_inode_getfile("sync_file", &sync_file_fops,
+ sync_file, 0);
+ if (IS_ERR(sync_file->file))
+ goto err;
+
+ init_waitqueue_head(&sync_file->wq);
+
+ INIT_LIST_HEAD(&sync_file->cb.node);
+
+ return sync_file;
+
+err:
+ kfree(sync_file);
+ return NULL;
+}
+
+static void fence_check_cb_func(struct dma_fence *f, struct dma_fence_cb *cb)
+{
+ struct sync_file *sync_file;
+
+ sync_file = container_of(cb, struct sync_file, cb);
+
+ wake_up_all(&sync_file->wq);
+}
+
+/**
+ * sync_file_create() - creates a sync file
+ * @fence: fence to add to the sync_fence
+ *
+ * Creates a sync_file containg @fence. This function acquires and additional
+ * reference of @fence for the newly-created &sync_file, if it succeeds. The
+ * sync_file can be released with fput(sync_file->file). Returns the
+ * sync_file or NULL in case of error.
+ */
+struct sync_file *sync_file_create(struct dma_fence *fence)
+{
+ struct sync_file *sync_file;
+
+ sync_file = sync_file_alloc();
+ if (!sync_file)
+ return NULL;
+
+ sync_file->fence = dma_fence_get(fence);
+
+ return sync_file;
+}
+EXPORT_SYMBOL(sync_file_create);
+
+static struct sync_file *sync_file_fdget(int fd)
+{
+ struct file *file = fget(fd);
+
+ if (!file)
+ return NULL;
+
+ if (file->f_op != &sync_file_fops)
+ goto err;
+
+ return file->private_data;
+
+err:
+ fput(file);
+ return NULL;
+}
+
+/**
+ * sync_file_get_fence - get the fence related to the sync_file fd
+ * @fd: sync_file fd to get the fence from
+ *
+ * Ensures @fd references a valid sync_file and returns a fence that
+ * represents all fence in the sync_file. On error NULL is returned.
+ */
+struct dma_fence *sync_file_get_fence(int fd)
+{
+ struct sync_file *sync_file;
+ struct dma_fence *fence;
+
+ sync_file = sync_file_fdget(fd);
+ if (!sync_file)
+ return NULL;
+
+ fence = dma_fence_get(sync_file->fence);
+ fput(sync_file->file);
+
+ return fence;
+}
+EXPORT_SYMBOL(sync_file_get_fence);
+
+/**
+ * sync_file_get_name - get the name of the sync_file
+ * @sync_file: sync_file to get the fence from
+ * @buf: destination buffer to copy sync_file name into
+ * @len: available size of destination buffer.
+ *
+ * Each sync_file may have a name assigned either by the user (when merging
+ * sync_files together) or created from the fence it contains. In the latter
+ * case construction of the name is deferred until use, and so requires
+ * sync_file_get_name().
+ *
+ * Returns: a string representing the name.
+ */
+char *sync_file_get_name(struct sync_file *sync_file, char *buf, int len)
+{
+ if (sync_file->user_name[0]) {
+ strscpy(buf, sync_file->user_name, len);
+ } else {
+ struct dma_fence *fence = sync_file->fence;
+
+ snprintf(buf, len, "%s-%s%llu-%lld",
+ fence->ops->get_driver_name(fence),
+ fence->ops->get_timeline_name(fence),
+ fence->context,
+ fence->seqno);
+ }
+
+ return buf;
+}
+
+/**
+ * sync_file_merge() - merge two sync_files
+ * @name: name of new fence
+ * @a: sync_file a
+ * @b: sync_file b
+ *
+ * Creates a new sync_file which contains copies of all the fences in both
+ * @a and @b. @a and @b remain valid, independent sync_file. Returns the
+ * new merged sync_file or NULL in case of error.
+ */
+static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
+ struct sync_file *b)
+{
+ struct sync_file *sync_file;
+ struct dma_fence *fence;
+
+ sync_file = sync_file_alloc();
+ if (!sync_file)
+ return NULL;
+
+ fence = dma_fence_unwrap_merge(a->fence, b->fence);
+ if (!fence) {
+ fput(sync_file->file);
+ return NULL;
+ }
+ sync_file->fence = fence;
+ strscpy(sync_file->user_name, name, sizeof(sync_file->user_name));
+ return sync_file;
+}
+
+static int sync_file_release(struct inode *inode, struct file *file)
+{
+ struct sync_file *sync_file = file->private_data;
+
+ if (test_bit(POLL_ENABLED, &sync_file->flags))
+ dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
+ dma_fence_put(sync_file->fence);
+ kfree(sync_file);
+
+ return 0;
+}
+
+static __poll_t sync_file_poll(struct file *file, poll_table *wait)
+{
+ struct sync_file *sync_file = file->private_data;
+
+ poll_wait(file, &sync_file->wq, wait);
+
+ if (list_empty(&sync_file->cb.node) &&
+ !test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
+ if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
+ fence_check_cb_func) < 0)
+ wake_up_all(&sync_file->wq);
+ }
+
+ return dma_fence_is_signaled(sync_file->fence) ? EPOLLIN : 0;
+}
+
+static long sync_file_ioctl_merge(struct sync_file *sync_file,
+ unsigned long arg)
+{
+ int fd = get_unused_fd_flags(O_CLOEXEC);
+ int err;
+ struct sync_file *fence2, *fence3;
+ struct sync_merge_data data;
+
+ if (fd < 0)
+ return fd;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+ err = -EFAULT;
+ goto err_put_fd;
+ }
+
+ if (data.flags || data.pad) {
+ err = -EINVAL;
+ goto err_put_fd;
+ }
+
+ fence2 = sync_file_fdget(data.fd2);
+ if (!fence2) {
+ err = -ENOENT;
+ goto err_put_fd;
+ }
+
+ data.name[sizeof(data.name) - 1] = '\0';
+ fence3 = sync_file_merge(data.name, sync_file, fence2);
+ if (!fence3) {
+ err = -ENOMEM;
+ goto err_put_fence2;
+ }
+
+ data.fence = fd;
+ if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+ err = -EFAULT;
+ goto err_put_fence3;
+ }
+
+ fd_install(fd, fence3->file);
+ fput(fence2->file);
+ return 0;
+
+err_put_fence3:
+ fput(fence3->file);
+
+err_put_fence2:
+ fput(fence2->file);
+
+err_put_fd:
+ put_unused_fd(fd);
+ return err;
+}
+
+static int sync_fill_fence_info(struct dma_fence *fence,
+ struct sync_fence_info *info)
+{
+ strscpy(info->obj_name, fence->ops->get_timeline_name(fence),
+ sizeof(info->obj_name));
+ strscpy(info->driver_name, fence->ops->get_driver_name(fence),
+ sizeof(info->driver_name));
+
+ info->status = dma_fence_get_status(fence);
+ info->timestamp_ns =
+ dma_fence_is_signaled(fence) ?
+ ktime_to_ns(dma_fence_timestamp(fence)) :
+ ktime_set(0, 0);
+
+ return info->status;
+}
+
+static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
+ unsigned long arg)
+{
+ struct sync_fence_info *fence_info = NULL;
+ struct dma_fence_unwrap iter;
+ struct sync_file_info info;
+ unsigned int num_fences;
+ struct dma_fence *fence;
+ int ret;
+ __u32 size;
+
+ if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
+ return -EFAULT;
+
+ if (info.flags || info.pad)
+ return -EINVAL;
+
+ num_fences = 0;
+ dma_fence_unwrap_for_each(fence, &iter, sync_file->fence)
+ ++num_fences;
+
+ /*
+ * Passing num_fences = 0 means that userspace doesn't want to
+ * retrieve any sync_fence_info. If num_fences = 0 we skip filling
+ * sync_fence_info and return the actual number of fences on
+ * info->num_fences.
+ */
+ if (!info.num_fences) {
+ info.status = dma_fence_get_status(sync_file->fence);
+ goto no_fences;
+ } else {
+ info.status = 1;
+ }
+
+ if (info.num_fences < num_fences)
+ return -EINVAL;
+
+ size = num_fences * sizeof(*fence_info);
+ fence_info = kzalloc(size, GFP_KERNEL);
+ if (!fence_info)
+ return -ENOMEM;
+
+ num_fences = 0;
+ dma_fence_unwrap_for_each(fence, &iter, sync_file->fence) {
+ int status;
+
+ status = sync_fill_fence_info(fence, &fence_info[num_fences++]);
+ info.status = info.status <= 0 ? info.status : status;
+ }
+
+ if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
+ size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+no_fences:
+ sync_file_get_name(sync_file, info.name, sizeof(info.name));
+ info.num_fences = num_fences;
+
+ if (copy_to_user((void __user *)arg, &info, sizeof(info)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+
+out:
+ kfree(fence_info);
+
+ return ret;
+}
+
+static long sync_file_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sync_file *sync_file = file->private_data;
+
+ switch (cmd) {
+ case SYNC_IOC_MERGE:
+ return sync_file_ioctl_merge(sync_file, arg);
+
+ case SYNC_IOC_FILE_INFO:
+ return sync_file_ioctl_fence_info(sync_file, arg);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct file_operations sync_file_fops = {
+ .release = sync_file_release,
+ .poll = sync_file_poll,
+ .unlocked_ioctl = sync_file_ioctl,
+ .compat_ioctl = compat_ptr_ioctl,
+};
diff --git a/drivers/dma-buf/sync_trace.h b/drivers/dma-buf/sync_trace.h
new file mode 100644
index 000000000..06e468a21
--- /dev/null
+++ b/drivers/dma-buf/sync_trace.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_INCLUDE_PATH ../../drivers/dma-buf
+#define TRACE_SYSTEM sync_trace
+
+#if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SYNC_H
+
+#include "sync_debug.h"
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(sync_timeline,
+ TP_PROTO(struct sync_timeline *timeline),
+
+ TP_ARGS(timeline),
+
+ TP_STRUCT__entry(
+ __string(name, timeline->name)
+ __field(u32, value)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, timeline->name);
+ __entry->value = timeline->value;
+ ),
+
+ TP_printk("name=%s value=%d", __get_str(name), __entry->value)
+);
+
+#endif /* if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ) */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c
new file mode 100644
index 000000000..2bcdb935a
--- /dev/null
+++ b/drivers/dma-buf/udmabuf.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/cred.h>
+#include <linux/device.h>
+#include <linux/dma-buf.h>
+#include <linux/highmem.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/memfd.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/shmem_fs.h>
+#include <linux/slab.h>
+#include <linux/udmabuf.h>
+#include <linux/hugetlb.h>
+
+static int list_limit = 1024;
+module_param(list_limit, int, 0644);
+MODULE_PARM_DESC(list_limit, "udmabuf_create_list->count limit. Default is 1024.");
+
+static int size_limit_mb = 64;
+module_param(size_limit_mb, int, 0644);
+MODULE_PARM_DESC(size_limit_mb, "Max size of a dmabuf, in megabytes. Default is 64.");
+
+struct udmabuf {
+ pgoff_t pagecount;
+ struct page **pages;
+ struct sg_table *sg;
+ struct miscdevice *device;
+};
+
+static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct udmabuf *ubuf = vma->vm_private_data;
+ pgoff_t pgoff = vmf->pgoff;
+
+ if (pgoff >= ubuf->pagecount)
+ return VM_FAULT_SIGBUS;
+ vmf->page = ubuf->pages[pgoff];
+ get_page(vmf->page);
+ return 0;
+}
+
+static const struct vm_operations_struct udmabuf_vm_ops = {
+ .fault = udmabuf_vm_fault,
+};
+
+static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
+{
+ struct udmabuf *ubuf = buf->priv;
+
+ if ((vma->vm_flags & (VM_SHARED | VM_MAYSHARE)) == 0)
+ return -EINVAL;
+
+ vma->vm_ops = &udmabuf_vm_ops;
+ vma->vm_private_data = ubuf;
+ return 0;
+}
+
+static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
+ enum dma_data_direction direction)
+{
+ struct udmabuf *ubuf = buf->priv;
+ struct sg_table *sg;
+ int ret;
+
+ sg = kzalloc(sizeof(*sg), GFP_KERNEL);
+ if (!sg)
+ return ERR_PTR(-ENOMEM);
+ ret = sg_alloc_table_from_pages(sg, ubuf->pages, ubuf->pagecount,
+ 0, ubuf->pagecount << PAGE_SHIFT,
+ GFP_KERNEL);
+ if (ret < 0)
+ goto err;
+ ret = dma_map_sgtable(dev, sg, direction, 0);
+ if (ret < 0)
+ goto err;
+ return sg;
+
+err:
+ sg_free_table(sg);
+ kfree(sg);
+ return ERR_PTR(ret);
+}
+
+static void put_sg_table(struct device *dev, struct sg_table *sg,
+ enum dma_data_direction direction)
+{
+ dma_unmap_sgtable(dev, sg, direction, 0);
+ sg_free_table(sg);
+ kfree(sg);
+}
+
+static struct sg_table *map_udmabuf(struct dma_buf_attachment *at,
+ enum dma_data_direction direction)
+{
+ return get_sg_table(at->dev, at->dmabuf, direction);
+}
+
+static void unmap_udmabuf(struct dma_buf_attachment *at,
+ struct sg_table *sg,
+ enum dma_data_direction direction)
+{
+ return put_sg_table(at->dev, sg, direction);
+}
+
+static void release_udmabuf(struct dma_buf *buf)
+{
+ struct udmabuf *ubuf = buf->priv;
+ struct device *dev = ubuf->device->this_device;
+ pgoff_t pg;
+
+ if (ubuf->sg)
+ put_sg_table(dev, ubuf->sg, DMA_BIDIRECTIONAL);
+
+ for (pg = 0; pg < ubuf->pagecount; pg++)
+ put_page(ubuf->pages[pg]);
+ kfree(ubuf->pages);
+ kfree(ubuf);
+}
+
+static int begin_cpu_udmabuf(struct dma_buf *buf,
+ enum dma_data_direction direction)
+{
+ struct udmabuf *ubuf = buf->priv;
+ struct device *dev = ubuf->device->this_device;
+ int ret = 0;
+
+ if (!ubuf->sg) {
+ ubuf->sg = get_sg_table(dev, buf, direction);
+ if (IS_ERR(ubuf->sg)) {
+ ret = PTR_ERR(ubuf->sg);
+ ubuf->sg = NULL;
+ }
+ } else {
+ dma_sync_sg_for_cpu(dev, ubuf->sg->sgl, ubuf->sg->nents,
+ direction);
+ }
+
+ return ret;
+}
+
+static int end_cpu_udmabuf(struct dma_buf *buf,
+ enum dma_data_direction direction)
+{
+ struct udmabuf *ubuf = buf->priv;
+ struct device *dev = ubuf->device->this_device;
+
+ if (!ubuf->sg)
+ return -EINVAL;
+
+ dma_sync_sg_for_device(dev, ubuf->sg->sgl, ubuf->sg->nents, direction);
+ return 0;
+}
+
+static const struct dma_buf_ops udmabuf_ops = {
+ .cache_sgt_mapping = true,
+ .map_dma_buf = map_udmabuf,
+ .unmap_dma_buf = unmap_udmabuf,
+ .release = release_udmabuf,
+ .mmap = mmap_udmabuf,
+ .begin_cpu_access = begin_cpu_udmabuf,
+ .end_cpu_access = end_cpu_udmabuf,
+};
+
+#define SEALS_WANTED (F_SEAL_SHRINK)
+#define SEALS_DENIED (F_SEAL_WRITE)
+
+static long udmabuf_create(struct miscdevice *device,
+ struct udmabuf_create_list *head,
+ struct udmabuf_create_item *list)
+{
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+ struct file *memfd = NULL;
+ struct address_space *mapping = NULL;
+ struct udmabuf *ubuf;
+ struct dma_buf *buf;
+ pgoff_t pgoff, pgcnt, pgidx, pgbuf = 0, pglimit;
+ struct page *page, *hpage = NULL;
+ pgoff_t subpgoff, maxsubpgs;
+ struct hstate *hpstate;
+ int seals, ret = -EINVAL;
+ u32 i, flags;
+
+ ubuf = kzalloc(sizeof(*ubuf), GFP_KERNEL);
+ if (!ubuf)
+ return -ENOMEM;
+
+ pglimit = (size_limit_mb * 1024 * 1024) >> PAGE_SHIFT;
+ for (i = 0; i < head->count; i++) {
+ if (!IS_ALIGNED(list[i].offset, PAGE_SIZE))
+ goto err;
+ if (!IS_ALIGNED(list[i].size, PAGE_SIZE))
+ goto err;
+ ubuf->pagecount += list[i].size >> PAGE_SHIFT;
+ if (ubuf->pagecount > pglimit)
+ goto err;
+ }
+
+ if (!ubuf->pagecount)
+ goto err;
+
+ ubuf->pages = kmalloc_array(ubuf->pagecount, sizeof(*ubuf->pages),
+ GFP_KERNEL);
+ if (!ubuf->pages) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ pgbuf = 0;
+ for (i = 0; i < head->count; i++) {
+ ret = -EBADFD;
+ memfd = fget(list[i].memfd);
+ if (!memfd)
+ goto err;
+ mapping = memfd->f_mapping;
+ if (!shmem_mapping(mapping) && !is_file_hugepages(memfd))
+ goto err;
+ seals = memfd_fcntl(memfd, F_GET_SEALS, 0);
+ if (seals == -EINVAL)
+ goto err;
+ ret = -EINVAL;
+ if ((seals & SEALS_WANTED) != SEALS_WANTED ||
+ (seals & SEALS_DENIED) != 0)
+ goto err;
+ pgoff = list[i].offset >> PAGE_SHIFT;
+ pgcnt = list[i].size >> PAGE_SHIFT;
+ if (is_file_hugepages(memfd)) {
+ hpstate = hstate_file(memfd);
+ pgoff = list[i].offset >> huge_page_shift(hpstate);
+ subpgoff = (list[i].offset &
+ ~huge_page_mask(hpstate)) >> PAGE_SHIFT;
+ maxsubpgs = huge_page_size(hpstate) >> PAGE_SHIFT;
+ }
+ for (pgidx = 0; pgidx < pgcnt; pgidx++) {
+ if (is_file_hugepages(memfd)) {
+ if (!hpage) {
+ hpage = find_get_page_flags(mapping, pgoff,
+ FGP_ACCESSED);
+ if (!hpage) {
+ ret = -EINVAL;
+ goto err;
+ }
+ }
+ page = hpage + subpgoff;
+ get_page(page);
+ subpgoff++;
+ if (subpgoff == maxsubpgs) {
+ put_page(hpage);
+ hpage = NULL;
+ subpgoff = 0;
+ pgoff++;
+ }
+ } else {
+ page = shmem_read_mapping_page(mapping,
+ pgoff + pgidx);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto err;
+ }
+ }
+ ubuf->pages[pgbuf++] = page;
+ }
+ fput(memfd);
+ memfd = NULL;
+ if (hpage) {
+ put_page(hpage);
+ hpage = NULL;
+ }
+ }
+
+ exp_info.ops = &udmabuf_ops;
+ exp_info.size = ubuf->pagecount << PAGE_SHIFT;
+ exp_info.priv = ubuf;
+ exp_info.flags = O_RDWR;
+
+ ubuf->device = device;
+ buf = dma_buf_export(&exp_info);
+ if (IS_ERR(buf)) {
+ ret = PTR_ERR(buf);
+ goto err;
+ }
+
+ flags = 0;
+ if (head->flags & UDMABUF_FLAGS_CLOEXEC)
+ flags |= O_CLOEXEC;
+ return dma_buf_fd(buf, flags);
+
+err:
+ while (pgbuf > 0)
+ put_page(ubuf->pages[--pgbuf]);
+ if (memfd)
+ fput(memfd);
+ kfree(ubuf->pages);
+ kfree(ubuf);
+ return ret;
+}
+
+static long udmabuf_ioctl_create(struct file *filp, unsigned long arg)
+{
+ struct udmabuf_create create;
+ struct udmabuf_create_list head;
+ struct udmabuf_create_item list;
+
+ if (copy_from_user(&create, (void __user *)arg,
+ sizeof(create)))
+ return -EFAULT;
+
+ head.flags = create.flags;
+ head.count = 1;
+ list.memfd = create.memfd;
+ list.offset = create.offset;
+ list.size = create.size;
+
+ return udmabuf_create(filp->private_data, &head, &list);
+}
+
+static long udmabuf_ioctl_create_list(struct file *filp, unsigned long arg)
+{
+ struct udmabuf_create_list head;
+ struct udmabuf_create_item *list;
+ int ret = -EINVAL;
+ u32 lsize;
+
+ if (copy_from_user(&head, (void __user *)arg, sizeof(head)))
+ return -EFAULT;
+ if (head.count > list_limit)
+ return -EINVAL;
+ lsize = sizeof(struct udmabuf_create_item) * head.count;
+ list = memdup_user((void __user *)(arg + sizeof(head)), lsize);
+ if (IS_ERR(list))
+ return PTR_ERR(list);
+
+ ret = udmabuf_create(filp->private_data, &head, list);
+ kfree(list);
+ return ret;
+}
+
+static long udmabuf_ioctl(struct file *filp, unsigned int ioctl,
+ unsigned long arg)
+{
+ long ret;
+
+ switch (ioctl) {
+ case UDMABUF_CREATE:
+ ret = udmabuf_ioctl_create(filp, arg);
+ break;
+ case UDMABUF_CREATE_LIST:
+ ret = udmabuf_ioctl_create_list(filp, arg);
+ break;
+ default:
+ ret = -ENOTTY;
+ break;
+ }
+ return ret;
+}
+
+static const struct file_operations udmabuf_fops = {
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = udmabuf_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = udmabuf_ioctl,
+#endif
+};
+
+static struct miscdevice udmabuf_misc = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "udmabuf",
+ .fops = &udmabuf_fops,
+};
+
+static int __init udmabuf_dev_init(void)
+{
+ int ret;
+
+ ret = misc_register(&udmabuf_misc);
+ if (ret < 0) {
+ pr_err("Could not initialize udmabuf device\n");
+ return ret;
+ }
+
+ ret = dma_coerce_mask_and_coherent(udmabuf_misc.this_device,
+ DMA_BIT_MASK(64));
+ if (ret < 0) {
+ pr_err("Could not setup DMA mask for udmabuf device\n");
+ misc_deregister(&udmabuf_misc);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void __exit udmabuf_dev_exit(void)
+{
+ misc_deregister(&udmabuf_misc);
+}
+
+module_init(udmabuf_dev_init)
+module_exit(udmabuf_dev_exit)
+
+MODULE_AUTHOR("Gerd Hoffmann <kraxel@redhat.com>");
+MODULE_LICENSE("GPL v2");