summaryrefslogtreecommitdiffstats
path: root/drivers/media/mc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/media/mc
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/media/mc')
-rw-r--r--drivers/media/mc/Kconfig20
-rw-r--r--drivers/media/mc/Makefile10
-rw-r--r--drivers/media/mc/mc-dev-allocator.c135
-rw-r--r--drivers/media/mc/mc-device.c889
-rw-r--r--drivers/media/mc/mc-devnode.c328
-rw-r--r--drivers/media/mc/mc-entity.c1611
-rw-r--r--drivers/media/mc/mc-request.c507
7 files changed, 3500 insertions, 0 deletions
diff --git a/drivers/media/mc/Kconfig b/drivers/media/mc/Kconfig
new file mode 100644
index 0000000000..375b096129
--- /dev/null
+++ b/drivers/media/mc/Kconfig
@@ -0,0 +1,20 @@
+# SPDX-License-Identifier: GPL-2.0
+
+#
+# Media controller
+#
+
+config MEDIA_CONTROLLER_DVB
+ bool "Enable Media controller for DVB (EXPERIMENTAL)"
+ depends on MEDIA_CONTROLLER && DVB_CORE
+ help
+ Enable the media controller API support for DVB.
+
+ This is currently experimental.
+
+config MEDIA_CONTROLLER_REQUEST_API
+ bool
+ depends on MEDIA_CONTROLLER
+ help
+ This option enables the Request API for the Media controller and V4L2
+ interfaces. It is currently needed by a few stateless codec drivers.
diff --git a/drivers/media/mc/Makefile b/drivers/media/mc/Makefile
new file mode 100644
index 0000000000..2b7af42ba5
--- /dev/null
+++ b/drivers/media/mc/Makefile
@@ -0,0 +1,10 @@
+# SPDX-License-Identifier: GPL-2.0
+
+mc-objs := mc-device.o mc-devnode.o mc-entity.o \
+ mc-request.o
+
+ifneq ($(CONFIG_USB),)
+ mc-objs += mc-dev-allocator.o
+endif
+
+obj-$(CONFIG_MEDIA_SUPPORT) += mc.o
diff --git a/drivers/media/mc/mc-dev-allocator.c b/drivers/media/mc/mc-dev-allocator.c
new file mode 100644
index 0000000000..ae17887dec
--- /dev/null
+++ b/drivers/media/mc/mc-dev-allocator.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * media-dev-allocator.c - Media Controller Device Allocator API
+ *
+ * Copyright (c) 2019 Shuah Khan <shuah@kernel.org>
+ *
+ * Credits: Suggested by Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+/*
+ * This file adds a global refcounted Media Controller Device Instance API.
+ * A system wide global media device list is managed and each media device
+ * includes a kref count. The last put on the media device releases the media
+ * device instance.
+ *
+ */
+
+#include <linux/kref.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include <media/media-device.h>
+#include <media/media-dev-allocator.h>
+
+static LIST_HEAD(media_device_list);
+static DEFINE_MUTEX(media_device_lock);
+
+struct media_device_instance {
+ struct media_device mdev;
+ struct module *owner;
+ struct list_head list;
+ struct kref refcount;
+};
+
+static inline struct media_device_instance *
+to_media_device_instance(struct media_device *mdev)
+{
+ return container_of(mdev, struct media_device_instance, mdev);
+}
+
+static void media_device_instance_release(struct kref *kref)
+{
+ struct media_device_instance *mdi =
+ container_of(kref, struct media_device_instance, refcount);
+
+ dev_dbg(mdi->mdev.dev, "%s: releasing Media Device\n", __func__);
+
+ mutex_lock(&media_device_lock);
+
+ media_device_unregister(&mdi->mdev);
+ media_device_cleanup(&mdi->mdev);
+
+ list_del(&mdi->list);
+ mutex_unlock(&media_device_lock);
+
+ kfree(mdi);
+}
+
+/* Callers should hold media_device_lock when calling this function */
+static struct media_device *__media_device_get(struct device *dev,
+ const char *module_name,
+ struct module *owner)
+{
+ struct media_device_instance *mdi;
+
+ list_for_each_entry(mdi, &media_device_list, list) {
+ if (mdi->mdev.dev != dev)
+ continue;
+
+ kref_get(&mdi->refcount);
+
+ /* get module reference for the media_device owner */
+ if (owner != mdi->owner && !try_module_get(mdi->owner))
+ dev_err(dev,
+ "%s: module %s get owner reference error\n",
+ __func__, module_name);
+ else
+ dev_dbg(dev, "%s: module %s got owner reference\n",
+ __func__, module_name);
+ return &mdi->mdev;
+ }
+
+ mdi = kzalloc(sizeof(*mdi), GFP_KERNEL);
+ if (!mdi)
+ return NULL;
+
+ mdi->owner = owner;
+ kref_init(&mdi->refcount);
+ list_add_tail(&mdi->list, &media_device_list);
+
+ dev_dbg(dev, "%s: Allocated media device for owner %s\n",
+ __func__, module_name);
+ return &mdi->mdev;
+}
+
+struct media_device *media_device_usb_allocate(struct usb_device *udev,
+ const char *module_name,
+ struct module *owner)
+{
+ struct media_device *mdev;
+
+ mutex_lock(&media_device_lock);
+ mdev = __media_device_get(&udev->dev, module_name, owner);
+ if (!mdev) {
+ mutex_unlock(&media_device_lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* check if media device is already initialized */
+ if (!mdev->dev)
+ __media_device_usb_init(mdev, udev, udev->product,
+ module_name);
+ mutex_unlock(&media_device_lock);
+ return mdev;
+}
+EXPORT_SYMBOL_GPL(media_device_usb_allocate);
+
+void media_device_delete(struct media_device *mdev, const char *module_name,
+ struct module *owner)
+{
+ struct media_device_instance *mdi = to_media_device_instance(mdev);
+
+ mutex_lock(&media_device_lock);
+ /* put module reference for the media_device owner */
+ if (mdi->owner != owner) {
+ module_put(mdi->owner);
+ dev_dbg(mdi->mdev.dev,
+ "%s: module %s put owner module reference\n",
+ __func__, module_name);
+ }
+ mutex_unlock(&media_device_lock);
+ kref_put(&mdi->refcount, media_device_instance_release);
+}
+EXPORT_SYMBOL_GPL(media_device_delete);
diff --git a/drivers/media/mc/mc-device.c b/drivers/media/mc/mc-device.c
new file mode 100644
index 0000000000..8cee956e38
--- /dev/null
+++ b/drivers/media/mc/mc-device.c
@@ -0,0 +1,889 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Media device
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ */
+
+#include <linux/compat.h>
+#include <linux/export.h>
+#include <linux/idr.h>
+#include <linux/ioctl.h>
+#include <linux/media.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/usb.h>
+#include <linux/version.h>
+
+#include <media/media-device.h>
+#include <media/media-devnode.h>
+#include <media/media-entity.h>
+#include <media/media-request.h>
+
+#ifdef CONFIG_MEDIA_CONTROLLER
+
+/*
+ * Legacy defines from linux/media.h. This is the only place we need this
+ * so we just define it here. The media.h header doesn't expose it to the
+ * kernel to prevent it from being used by drivers, but here (and only here!)
+ * we need it to handle the legacy behavior.
+ */
+#define MEDIA_ENT_SUBTYPE_MASK 0x0000ffff
+#define MEDIA_ENT_T_DEVNODE_UNKNOWN (MEDIA_ENT_F_OLD_BASE | \
+ MEDIA_ENT_SUBTYPE_MASK)
+
+/* -----------------------------------------------------------------------------
+ * Userspace API
+ */
+
+static inline void __user *media_get_uptr(__u64 arg)
+{
+ return (void __user *)(uintptr_t)arg;
+}
+
+static int media_device_open(struct file *filp)
+{
+ return 0;
+}
+
+static int media_device_close(struct file *filp)
+{
+ return 0;
+}
+
+static long media_device_get_info(struct media_device *dev, void *arg)
+{
+ struct media_device_info *info = arg;
+
+ memset(info, 0, sizeof(*info));
+
+ if (dev->driver_name[0])
+ strscpy(info->driver, dev->driver_name, sizeof(info->driver));
+ else
+ strscpy(info->driver, dev->dev->driver->name,
+ sizeof(info->driver));
+
+ strscpy(info->model, dev->model, sizeof(info->model));
+ strscpy(info->serial, dev->serial, sizeof(info->serial));
+ strscpy(info->bus_info, dev->bus_info, sizeof(info->bus_info));
+
+ info->media_version = LINUX_VERSION_CODE;
+ info->driver_version = info->media_version;
+ info->hw_revision = dev->hw_revision;
+
+ return 0;
+}
+
+static struct media_entity *find_entity(struct media_device *mdev, u32 id)
+{
+ struct media_entity *entity;
+ int next = id & MEDIA_ENT_ID_FLAG_NEXT;
+
+ id &= ~MEDIA_ENT_ID_FLAG_NEXT;
+
+ media_device_for_each_entity(entity, mdev) {
+ if (((media_entity_id(entity) == id) && !next) ||
+ ((media_entity_id(entity) > id) && next)) {
+ return entity;
+ }
+ }
+
+ return NULL;
+}
+
+static long media_device_enum_entities(struct media_device *mdev, void *arg)
+{
+ struct media_entity_desc *entd = arg;
+ struct media_entity *ent;
+
+ ent = find_entity(mdev, entd->id);
+ if (ent == NULL)
+ return -EINVAL;
+
+ memset(entd, 0, sizeof(*entd));
+
+ entd->id = media_entity_id(ent);
+ if (ent->name)
+ strscpy(entd->name, ent->name, sizeof(entd->name));
+ entd->type = ent->function;
+ entd->revision = 0; /* Unused */
+ entd->flags = ent->flags;
+ entd->group_id = 0; /* Unused */
+ entd->pads = ent->num_pads;
+ entd->links = ent->num_links - ent->num_backlinks;
+
+ /*
+ * Workaround for a bug at media-ctl <= v1.10 that makes it to
+ * do the wrong thing if the entity function doesn't belong to
+ * either MEDIA_ENT_F_OLD_BASE or MEDIA_ENT_F_OLD_SUBDEV_BASE
+ * Ranges.
+ *
+ * Non-subdevices are expected to be at the MEDIA_ENT_F_OLD_BASE,
+ * or, otherwise, will be silently ignored by media-ctl when
+ * printing the graphviz diagram. So, map them into the devnode
+ * old range.
+ */
+ if (ent->function < MEDIA_ENT_F_OLD_BASE ||
+ ent->function > MEDIA_ENT_F_TUNER) {
+ if (is_media_entity_v4l2_subdev(ent))
+ entd->type = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN;
+ else if (ent->function != MEDIA_ENT_F_IO_V4L)
+ entd->type = MEDIA_ENT_T_DEVNODE_UNKNOWN;
+ }
+
+ memcpy(&entd->raw, &ent->info, sizeof(ent->info));
+
+ return 0;
+}
+
+static void media_device_kpad_to_upad(const struct media_pad *kpad,
+ struct media_pad_desc *upad)
+{
+ upad->entity = media_entity_id(kpad->entity);
+ upad->index = kpad->index;
+ upad->flags = kpad->flags;
+}
+
+static long media_device_enum_links(struct media_device *mdev, void *arg)
+{
+ struct media_links_enum *links = arg;
+ struct media_entity *entity;
+
+ entity = find_entity(mdev, links->entity);
+ if (entity == NULL)
+ return -EINVAL;
+
+ if (links->pads) {
+ unsigned int p;
+
+ for (p = 0; p < entity->num_pads; p++) {
+ struct media_pad_desc pad;
+
+ memset(&pad, 0, sizeof(pad));
+ media_device_kpad_to_upad(&entity->pads[p], &pad);
+ if (copy_to_user(&links->pads[p], &pad, sizeof(pad)))
+ return -EFAULT;
+ }
+ }
+
+ if (links->links) {
+ struct media_link *link;
+ struct media_link_desc __user *ulink_desc = links->links;
+
+ list_for_each_entry(link, &entity->links, list) {
+ struct media_link_desc klink_desc;
+
+ /* Ignore backlinks. */
+ if (link->source->entity != entity)
+ continue;
+ memset(&klink_desc, 0, sizeof(klink_desc));
+ media_device_kpad_to_upad(link->source,
+ &klink_desc.source);
+ media_device_kpad_to_upad(link->sink,
+ &klink_desc.sink);
+ klink_desc.flags = link->flags;
+ if (copy_to_user(ulink_desc, &klink_desc,
+ sizeof(*ulink_desc)))
+ return -EFAULT;
+ ulink_desc++;
+ }
+ }
+ memset(links->reserved, 0, sizeof(links->reserved));
+
+ return 0;
+}
+
+static long media_device_setup_link(struct media_device *mdev, void *arg)
+{
+ struct media_link_desc *linkd = arg;
+ struct media_link *link = NULL;
+ struct media_entity *source;
+ struct media_entity *sink;
+
+ /* Find the source and sink entities and link.
+ */
+ source = find_entity(mdev, linkd->source.entity);
+ sink = find_entity(mdev, linkd->sink.entity);
+
+ if (source == NULL || sink == NULL)
+ return -EINVAL;
+
+ if (linkd->source.index >= source->num_pads ||
+ linkd->sink.index >= sink->num_pads)
+ return -EINVAL;
+
+ link = media_entity_find_link(&source->pads[linkd->source.index],
+ &sink->pads[linkd->sink.index]);
+ if (link == NULL)
+ return -EINVAL;
+
+ memset(linkd->reserved, 0, sizeof(linkd->reserved));
+
+ /* Setup the link on both entities. */
+ return __media_entity_setup_link(link, linkd->flags);
+}
+
+static long media_device_get_topology(struct media_device *mdev, void *arg)
+{
+ struct media_v2_topology *topo = arg;
+ struct media_entity *entity;
+ struct media_interface *intf;
+ struct media_pad *pad;
+ struct media_link *link;
+ struct media_v2_entity kentity, __user *uentity;
+ struct media_v2_interface kintf, __user *uintf;
+ struct media_v2_pad kpad, __user *upad;
+ struct media_v2_link klink, __user *ulink;
+ unsigned int i;
+ int ret = 0;
+
+ topo->topology_version = mdev->topology_version;
+
+ /* Get entities and number of entities */
+ i = 0;
+ uentity = media_get_uptr(topo->ptr_entities);
+ media_device_for_each_entity(entity, mdev) {
+ i++;
+ if (ret || !uentity)
+ continue;
+
+ if (i > topo->num_entities) {
+ ret = -ENOSPC;
+ continue;
+ }
+
+ /* Copy fields to userspace struct if not error */
+ memset(&kentity, 0, sizeof(kentity));
+ kentity.id = entity->graph_obj.id;
+ kentity.function = entity->function;
+ kentity.flags = entity->flags;
+ strscpy(kentity.name, entity->name,
+ sizeof(kentity.name));
+
+ if (copy_to_user(uentity, &kentity, sizeof(kentity)))
+ ret = -EFAULT;
+ uentity++;
+ }
+ topo->num_entities = i;
+ topo->reserved1 = 0;
+
+ /* Get interfaces and number of interfaces */
+ i = 0;
+ uintf = media_get_uptr(topo->ptr_interfaces);
+ media_device_for_each_intf(intf, mdev) {
+ i++;
+ if (ret || !uintf)
+ continue;
+
+ if (i > topo->num_interfaces) {
+ ret = -ENOSPC;
+ continue;
+ }
+
+ memset(&kintf, 0, sizeof(kintf));
+
+ /* Copy intf fields to userspace struct */
+ kintf.id = intf->graph_obj.id;
+ kintf.intf_type = intf->type;
+ kintf.flags = intf->flags;
+
+ if (media_type(&intf->graph_obj) == MEDIA_GRAPH_INTF_DEVNODE) {
+ struct media_intf_devnode *devnode;
+
+ devnode = intf_to_devnode(intf);
+
+ kintf.devnode.major = devnode->major;
+ kintf.devnode.minor = devnode->minor;
+ }
+
+ if (copy_to_user(uintf, &kintf, sizeof(kintf)))
+ ret = -EFAULT;
+ uintf++;
+ }
+ topo->num_interfaces = i;
+ topo->reserved2 = 0;
+
+ /* Get pads and number of pads */
+ i = 0;
+ upad = media_get_uptr(topo->ptr_pads);
+ media_device_for_each_pad(pad, mdev) {
+ i++;
+ if (ret || !upad)
+ continue;
+
+ if (i > topo->num_pads) {
+ ret = -ENOSPC;
+ continue;
+ }
+
+ memset(&kpad, 0, sizeof(kpad));
+
+ /* Copy pad fields to userspace struct */
+ kpad.id = pad->graph_obj.id;
+ kpad.entity_id = pad->entity->graph_obj.id;
+ kpad.flags = pad->flags;
+ kpad.index = pad->index;
+
+ if (copy_to_user(upad, &kpad, sizeof(kpad)))
+ ret = -EFAULT;
+ upad++;
+ }
+ topo->num_pads = i;
+ topo->reserved3 = 0;
+
+ /* Get links and number of links */
+ i = 0;
+ ulink = media_get_uptr(topo->ptr_links);
+ media_device_for_each_link(link, mdev) {
+ if (link->is_backlink)
+ continue;
+
+ i++;
+
+ if (ret || !ulink)
+ continue;
+
+ if (i > topo->num_links) {
+ ret = -ENOSPC;
+ continue;
+ }
+
+ memset(&klink, 0, sizeof(klink));
+
+ /* Copy link fields to userspace struct */
+ klink.id = link->graph_obj.id;
+ klink.source_id = link->gobj0->id;
+ klink.sink_id = link->gobj1->id;
+ klink.flags = link->flags;
+
+ if (copy_to_user(ulink, &klink, sizeof(klink)))
+ ret = -EFAULT;
+ ulink++;
+ }
+ topo->num_links = i;
+ topo->reserved4 = 0;
+
+ return ret;
+}
+
+static long media_device_request_alloc(struct media_device *mdev, void *arg)
+{
+#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
+ int *alloc_fd = arg;
+
+ if (!mdev->ops || !mdev->ops->req_validate || !mdev->ops->req_queue)
+ return -ENOTTY;
+
+ return media_request_alloc(mdev, alloc_fd);
+#else
+ return -ENOTTY;
+#endif
+}
+
+static long copy_arg_from_user(void *karg, void __user *uarg, unsigned int cmd)
+{
+ if ((_IOC_DIR(cmd) & _IOC_WRITE) &&
+ copy_from_user(karg, uarg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long copy_arg_to_user(void __user *uarg, void *karg, unsigned int cmd)
+{
+ if ((_IOC_DIR(cmd) & _IOC_READ) &&
+ copy_to_user(uarg, karg, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ return 0;
+}
+
+/* Do acquire the graph mutex */
+#define MEDIA_IOC_FL_GRAPH_MUTEX BIT(0)
+
+#define MEDIA_IOC_ARG(__cmd, func, fl, from_user, to_user) \
+ [_IOC_NR(MEDIA_IOC_##__cmd)] = { \
+ .cmd = MEDIA_IOC_##__cmd, \
+ .fn = func, \
+ .flags = fl, \
+ .arg_from_user = from_user, \
+ .arg_to_user = to_user, \
+ }
+
+#define MEDIA_IOC(__cmd, func, fl) \
+ MEDIA_IOC_ARG(__cmd, func, fl, copy_arg_from_user, copy_arg_to_user)
+
+/* the table is indexed by _IOC_NR(cmd) */
+struct media_ioctl_info {
+ unsigned int cmd;
+ unsigned short flags;
+ long (*fn)(struct media_device *dev, void *arg);
+ long (*arg_from_user)(void *karg, void __user *uarg, unsigned int cmd);
+ long (*arg_to_user)(void __user *uarg, void *karg, unsigned int cmd);
+};
+
+static const struct media_ioctl_info ioctl_info[] = {
+ MEDIA_IOC(DEVICE_INFO, media_device_get_info, MEDIA_IOC_FL_GRAPH_MUTEX),
+ MEDIA_IOC(ENUM_ENTITIES, media_device_enum_entities, MEDIA_IOC_FL_GRAPH_MUTEX),
+ MEDIA_IOC(ENUM_LINKS, media_device_enum_links, MEDIA_IOC_FL_GRAPH_MUTEX),
+ MEDIA_IOC(SETUP_LINK, media_device_setup_link, MEDIA_IOC_FL_GRAPH_MUTEX),
+ MEDIA_IOC(G_TOPOLOGY, media_device_get_topology, MEDIA_IOC_FL_GRAPH_MUTEX),
+ MEDIA_IOC(REQUEST_ALLOC, media_device_request_alloc, 0),
+};
+
+static long media_device_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long __arg)
+{
+ struct media_devnode *devnode = media_devnode_data(filp);
+ struct media_device *dev = devnode->media_dev;
+ const struct media_ioctl_info *info;
+ void __user *arg = (void __user *)__arg;
+ char __karg[256], *karg = __karg;
+ long ret;
+
+ if (_IOC_NR(cmd) >= ARRAY_SIZE(ioctl_info)
+ || ioctl_info[_IOC_NR(cmd)].cmd != cmd)
+ return -ENOIOCTLCMD;
+
+ info = &ioctl_info[_IOC_NR(cmd)];
+
+ if (_IOC_SIZE(info->cmd) > sizeof(__karg)) {
+ karg = kmalloc(_IOC_SIZE(info->cmd), GFP_KERNEL);
+ if (!karg)
+ return -ENOMEM;
+ }
+
+ if (info->arg_from_user) {
+ ret = info->arg_from_user(karg, arg, cmd);
+ if (ret)
+ goto out_free;
+ }
+
+ if (info->flags & MEDIA_IOC_FL_GRAPH_MUTEX)
+ mutex_lock(&dev->graph_mutex);
+
+ ret = info->fn(dev, karg);
+
+ if (info->flags & MEDIA_IOC_FL_GRAPH_MUTEX)
+ mutex_unlock(&dev->graph_mutex);
+
+ if (!ret && info->arg_to_user)
+ ret = info->arg_to_user(arg, karg, cmd);
+
+out_free:
+ if (karg != __karg)
+ kfree(karg);
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+
+struct media_links_enum32 {
+ __u32 entity;
+ compat_uptr_t pads; /* struct media_pad_desc * */
+ compat_uptr_t links; /* struct media_link_desc * */
+ __u32 reserved[4];
+};
+
+static long media_device_enum_links32(struct media_device *mdev,
+ struct media_links_enum32 __user *ulinks)
+{
+ struct media_links_enum links;
+ compat_uptr_t pads_ptr, links_ptr;
+ int ret;
+
+ memset(&links, 0, sizeof(links));
+
+ if (get_user(links.entity, &ulinks->entity)
+ || get_user(pads_ptr, &ulinks->pads)
+ || get_user(links_ptr, &ulinks->links))
+ return -EFAULT;
+
+ links.pads = compat_ptr(pads_ptr);
+ links.links = compat_ptr(links_ptr);
+
+ ret = media_device_enum_links(mdev, &links);
+ if (ret)
+ return ret;
+
+ if (copy_to_user(ulinks->reserved, links.reserved,
+ sizeof(ulinks->reserved)))
+ return -EFAULT;
+ return 0;
+}
+
+#define MEDIA_IOC_ENUM_LINKS32 _IOWR('|', 0x02, struct media_links_enum32)
+
+static long media_device_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct media_devnode *devnode = media_devnode_data(filp);
+ struct media_device *dev = devnode->media_dev;
+ long ret;
+
+ switch (cmd) {
+ case MEDIA_IOC_ENUM_LINKS32:
+ mutex_lock(&dev->graph_mutex);
+ ret = media_device_enum_links32(dev,
+ (struct media_links_enum32 __user *)arg);
+ mutex_unlock(&dev->graph_mutex);
+ break;
+
+ default:
+ return media_device_ioctl(filp, cmd, arg);
+ }
+
+ return ret;
+}
+#endif /* CONFIG_COMPAT */
+
+static const struct media_file_operations media_device_fops = {
+ .owner = THIS_MODULE,
+ .open = media_device_open,
+ .ioctl = media_device_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = media_device_compat_ioctl,
+#endif /* CONFIG_COMPAT */
+ .release = media_device_close,
+};
+
+/* -----------------------------------------------------------------------------
+ * sysfs
+ */
+
+static ssize_t model_show(struct device *cd,
+ struct device_attribute *attr, char *buf)
+{
+ struct media_devnode *devnode = to_media_devnode(cd);
+ struct media_device *mdev = devnode->media_dev;
+
+ return sprintf(buf, "%.*s\n", (int)sizeof(mdev->model), mdev->model);
+}
+
+static DEVICE_ATTR_RO(model);
+
+/* -----------------------------------------------------------------------------
+ * Registration/unregistration
+ */
+
+static void media_device_release(struct media_devnode *devnode)
+{
+ dev_dbg(devnode->parent, "Media device released\n");
+}
+
+static void __media_device_unregister_entity(struct media_entity *entity)
+{
+ struct media_device *mdev = entity->graph_obj.mdev;
+ struct media_link *link, *tmp;
+ struct media_interface *intf;
+ struct media_pad *iter;
+
+ ida_free(&mdev->entity_internal_idx, entity->internal_idx);
+
+ /* Remove all interface links pointing to this entity */
+ list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) {
+ list_for_each_entry_safe(link, tmp, &intf->links, list) {
+ if (link->entity == entity)
+ __media_remove_intf_link(link);
+ }
+ }
+
+ /* Remove all data links that belong to this entity */
+ __media_entity_remove_links(entity);
+
+ /* Remove all pads that belong to this entity */
+ media_entity_for_each_pad(entity, iter)
+ media_gobj_destroy(&iter->graph_obj);
+
+ /* Remove the entity */
+ media_gobj_destroy(&entity->graph_obj);
+
+ /* invoke entity_notify callbacks to handle entity removal?? */
+}
+
+int __must_check media_device_register_entity(struct media_device *mdev,
+ struct media_entity *entity)
+{
+ struct media_entity_notify *notify, *next;
+ struct media_pad *iter;
+ int ret;
+
+ if (entity->function == MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN ||
+ entity->function == MEDIA_ENT_F_UNKNOWN)
+ dev_warn(mdev->dev,
+ "Entity type for entity %s was not initialized!\n",
+ entity->name);
+
+ /* Warn if we apparently re-register an entity */
+ WARN_ON(entity->graph_obj.mdev != NULL);
+ entity->graph_obj.mdev = mdev;
+ INIT_LIST_HEAD(&entity->links);
+ entity->num_links = 0;
+ entity->num_backlinks = 0;
+
+ ret = ida_alloc_min(&mdev->entity_internal_idx, 1, GFP_KERNEL);
+ if (ret < 0)
+ return ret;
+ entity->internal_idx = ret;
+
+ mutex_lock(&mdev->graph_mutex);
+ mdev->entity_internal_idx_max =
+ max(mdev->entity_internal_idx_max, entity->internal_idx);
+
+ /* Initialize media_gobj embedded at the entity */
+ media_gobj_create(mdev, MEDIA_GRAPH_ENTITY, &entity->graph_obj);
+
+ /* Initialize objects at the pads */
+ media_entity_for_each_pad(entity, iter)
+ media_gobj_create(mdev, MEDIA_GRAPH_PAD, &iter->graph_obj);
+
+ /* invoke entity_notify callbacks */
+ list_for_each_entry_safe(notify, next, &mdev->entity_notify, list)
+ notify->notify(entity, notify->notify_data);
+
+ if (mdev->entity_internal_idx_max
+ >= mdev->pm_count_walk.ent_enum.idx_max) {
+ struct media_graph new = { .top = 0 };
+
+ /*
+ * Initialise the new graph walk before cleaning up
+ * the old one in order not to spoil the graph walk
+ * object of the media device if graph walk init fails.
+ */
+ ret = media_graph_walk_init(&new, mdev);
+ if (ret) {
+ __media_device_unregister_entity(entity);
+ mutex_unlock(&mdev->graph_mutex);
+ return ret;
+ }
+ media_graph_walk_cleanup(&mdev->pm_count_walk);
+ mdev->pm_count_walk = new;
+ }
+ mutex_unlock(&mdev->graph_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(media_device_register_entity);
+
+void media_device_unregister_entity(struct media_entity *entity)
+{
+ struct media_device *mdev = entity->graph_obj.mdev;
+
+ if (mdev == NULL)
+ return;
+
+ mutex_lock(&mdev->graph_mutex);
+ __media_device_unregister_entity(entity);
+ mutex_unlock(&mdev->graph_mutex);
+}
+EXPORT_SYMBOL_GPL(media_device_unregister_entity);
+
+void media_device_init(struct media_device *mdev)
+{
+ INIT_LIST_HEAD(&mdev->entities);
+ INIT_LIST_HEAD(&mdev->interfaces);
+ INIT_LIST_HEAD(&mdev->pads);
+ INIT_LIST_HEAD(&mdev->links);
+ INIT_LIST_HEAD(&mdev->entity_notify);
+
+ mutex_init(&mdev->req_queue_mutex);
+ mutex_init(&mdev->graph_mutex);
+ ida_init(&mdev->entity_internal_idx);
+
+ atomic_set(&mdev->request_id, 0);
+
+ if (!*mdev->bus_info)
+ media_set_bus_info(mdev->bus_info, sizeof(mdev->bus_info),
+ mdev->dev);
+
+ dev_dbg(mdev->dev, "Media device initialized\n");
+}
+EXPORT_SYMBOL_GPL(media_device_init);
+
+void media_device_cleanup(struct media_device *mdev)
+{
+ ida_destroy(&mdev->entity_internal_idx);
+ mdev->entity_internal_idx_max = 0;
+ media_graph_walk_cleanup(&mdev->pm_count_walk);
+ mutex_destroy(&mdev->graph_mutex);
+ mutex_destroy(&mdev->req_queue_mutex);
+}
+EXPORT_SYMBOL_GPL(media_device_cleanup);
+
+int __must_check __media_device_register(struct media_device *mdev,
+ struct module *owner)
+{
+ struct media_devnode *devnode;
+ int ret;
+
+ devnode = kzalloc(sizeof(*devnode), GFP_KERNEL);
+ if (!devnode)
+ return -ENOMEM;
+
+ /* Register the device node. */
+ mdev->devnode = devnode;
+ devnode->fops = &media_device_fops;
+ devnode->parent = mdev->dev;
+ devnode->release = media_device_release;
+
+ /* Set version 0 to indicate user-space that the graph is static */
+ mdev->topology_version = 0;
+
+ ret = media_devnode_register(mdev, devnode, owner);
+ if (ret < 0) {
+ /* devnode free is handled in media_devnode_*() */
+ mdev->devnode = NULL;
+ return ret;
+ }
+
+ ret = device_create_file(&devnode->dev, &dev_attr_model);
+ if (ret < 0) {
+ /* devnode free is handled in media_devnode_*() */
+ mdev->devnode = NULL;
+ media_devnode_unregister_prepare(devnode);
+ media_devnode_unregister(devnode);
+ return ret;
+ }
+
+ dev_dbg(mdev->dev, "Media device registered\n");
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__media_device_register);
+
+void media_device_register_entity_notify(struct media_device *mdev,
+ struct media_entity_notify *nptr)
+{
+ mutex_lock(&mdev->graph_mutex);
+ list_add_tail(&nptr->list, &mdev->entity_notify);
+ mutex_unlock(&mdev->graph_mutex);
+}
+EXPORT_SYMBOL_GPL(media_device_register_entity_notify);
+
+/*
+ * Note: Should be called with mdev->lock held.
+ */
+static void __media_device_unregister_entity_notify(struct media_device *mdev,
+ struct media_entity_notify *nptr)
+{
+ list_del(&nptr->list);
+}
+
+void media_device_unregister_entity_notify(struct media_device *mdev,
+ struct media_entity_notify *nptr)
+{
+ mutex_lock(&mdev->graph_mutex);
+ __media_device_unregister_entity_notify(mdev, nptr);
+ mutex_unlock(&mdev->graph_mutex);
+}
+EXPORT_SYMBOL_GPL(media_device_unregister_entity_notify);
+
+void media_device_unregister(struct media_device *mdev)
+{
+ struct media_entity *entity;
+ struct media_entity *next;
+ struct media_interface *intf, *tmp_intf;
+ struct media_entity_notify *notify, *nextp;
+
+ if (mdev == NULL)
+ return;
+
+ mutex_lock(&mdev->graph_mutex);
+
+ /* Check if mdev was ever registered at all */
+ if (!media_devnode_is_registered(mdev->devnode)) {
+ mutex_unlock(&mdev->graph_mutex);
+ return;
+ }
+
+ /* Clear the devnode register bit to avoid races with media dev open */
+ media_devnode_unregister_prepare(mdev->devnode);
+
+ /* Remove all entities from the media device */
+ list_for_each_entry_safe(entity, next, &mdev->entities, graph_obj.list)
+ __media_device_unregister_entity(entity);
+
+ /* Remove all entity_notify callbacks from the media device */
+ list_for_each_entry_safe(notify, nextp, &mdev->entity_notify, list)
+ __media_device_unregister_entity_notify(mdev, notify);
+
+ /* Remove all interfaces from the media device */
+ list_for_each_entry_safe(intf, tmp_intf, &mdev->interfaces,
+ graph_obj.list) {
+ /*
+ * Unlink the interface, but don't free it here; the
+ * module which created it is responsible for freeing
+ * it
+ */
+ __media_remove_intf_links(intf);
+ media_gobj_destroy(&intf->graph_obj);
+ }
+
+ mutex_unlock(&mdev->graph_mutex);
+
+ dev_dbg(mdev->dev, "Media device unregistered\n");
+
+ device_remove_file(&mdev->devnode->dev, &dev_attr_model);
+ media_devnode_unregister(mdev->devnode);
+ /* devnode free is handled in media_devnode_*() */
+ mdev->devnode = NULL;
+}
+EXPORT_SYMBOL_GPL(media_device_unregister);
+
+#if IS_ENABLED(CONFIG_PCI)
+void media_device_pci_init(struct media_device *mdev,
+ struct pci_dev *pci_dev,
+ const char *name)
+{
+ mdev->dev = &pci_dev->dev;
+
+ if (name)
+ strscpy(mdev->model, name, sizeof(mdev->model));
+ else
+ strscpy(mdev->model, pci_name(pci_dev), sizeof(mdev->model));
+
+ sprintf(mdev->bus_info, "PCI:%s", pci_name(pci_dev));
+
+ mdev->hw_revision = (pci_dev->subsystem_vendor << 16)
+ | pci_dev->subsystem_device;
+
+ media_device_init(mdev);
+}
+EXPORT_SYMBOL_GPL(media_device_pci_init);
+#endif
+
+#if IS_ENABLED(CONFIG_USB)
+void __media_device_usb_init(struct media_device *mdev,
+ struct usb_device *udev,
+ const char *board_name,
+ const char *driver_name)
+{
+ mdev->dev = &udev->dev;
+
+ if (driver_name)
+ strscpy(mdev->driver_name, driver_name,
+ sizeof(mdev->driver_name));
+
+ if (board_name)
+ strscpy(mdev->model, board_name, sizeof(mdev->model));
+ else if (udev->product)
+ strscpy(mdev->model, udev->product, sizeof(mdev->model));
+ else
+ strscpy(mdev->model, "unknown model", sizeof(mdev->model));
+ if (udev->serial)
+ strscpy(mdev->serial, udev->serial, sizeof(mdev->serial));
+ usb_make_path(udev, mdev->bus_info, sizeof(mdev->bus_info));
+ mdev->hw_revision = le16_to_cpu(udev->descriptor.bcdDevice);
+
+ media_device_init(mdev);
+}
+EXPORT_SYMBOL_GPL(__media_device_usb_init);
+#endif
+
+
+#endif /* CONFIG_MEDIA_CONTROLLER */
diff --git a/drivers/media/mc/mc-devnode.c b/drivers/media/mc/mc-devnode.c
new file mode 100644
index 0000000000..680fbb3a93
--- /dev/null
+++ b/drivers/media/mc/mc-devnode.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Media device node
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Based on drivers/media/video/v4l2_dev.c code authored by
+ * Mauro Carvalho Chehab <mchehab@kernel.org> (version 2)
+ * Alan Cox, <alan@lxorguk.ukuu.org.uk> (version 1)
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ *
+ * --
+ *
+ * Generic media device node infrastructure to register and unregister
+ * character devices using a dynamic major number and proper reference
+ * counting.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+#include <media/media-devnode.h>
+#include <media/media-device.h>
+
+#define MEDIA_NUM_DEVICES 256
+#define MEDIA_NAME "media"
+
+static dev_t media_dev_t;
+
+/*
+ * Active devices
+ */
+static DEFINE_MUTEX(media_devnode_lock);
+static DECLARE_BITMAP(media_devnode_nums, MEDIA_NUM_DEVICES);
+
+/* Called when the last user of the media device exits. */
+static void media_devnode_release(struct device *cd)
+{
+ struct media_devnode *devnode = to_media_devnode(cd);
+
+ mutex_lock(&media_devnode_lock);
+ /* Mark device node number as free */
+ clear_bit(devnode->minor, media_devnode_nums);
+ mutex_unlock(&media_devnode_lock);
+
+ /* Release media_devnode and perform other cleanups as needed. */
+ if (devnode->release)
+ devnode->release(devnode);
+
+ kfree(devnode);
+ pr_debug("%s: Media Devnode Deallocated\n", __func__);
+}
+
+static struct bus_type media_bus_type = {
+ .name = MEDIA_NAME,
+};
+
+static ssize_t media_read(struct file *filp, char __user *buf,
+ size_t sz, loff_t *off)
+{
+ struct media_devnode *devnode = media_devnode_data(filp);
+
+ if (!devnode->fops->read)
+ return -EINVAL;
+ if (!media_devnode_is_registered(devnode))
+ return -EIO;
+ return devnode->fops->read(filp, buf, sz, off);
+}
+
+static ssize_t media_write(struct file *filp, const char __user *buf,
+ size_t sz, loff_t *off)
+{
+ struct media_devnode *devnode = media_devnode_data(filp);
+
+ if (!devnode->fops->write)
+ return -EINVAL;
+ if (!media_devnode_is_registered(devnode))
+ return -EIO;
+ return devnode->fops->write(filp, buf, sz, off);
+}
+
+static __poll_t media_poll(struct file *filp,
+ struct poll_table_struct *poll)
+{
+ struct media_devnode *devnode = media_devnode_data(filp);
+
+ if (!media_devnode_is_registered(devnode))
+ return EPOLLERR | EPOLLHUP;
+ if (!devnode->fops->poll)
+ return DEFAULT_POLLMASK;
+ return devnode->fops->poll(filp, poll);
+}
+
+static long
+__media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg,
+ long (*ioctl_func)(struct file *filp, unsigned int cmd,
+ unsigned long arg))
+{
+ struct media_devnode *devnode = media_devnode_data(filp);
+
+ if (!ioctl_func)
+ return -ENOTTY;
+
+ if (!media_devnode_is_registered(devnode))
+ return -EIO;
+
+ return ioctl_func(filp, cmd, arg);
+}
+
+static long media_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ struct media_devnode *devnode = media_devnode_data(filp);
+
+ return __media_ioctl(filp, cmd, arg, devnode->fops->ioctl);
+}
+
+#ifdef CONFIG_COMPAT
+
+static long media_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct media_devnode *devnode = media_devnode_data(filp);
+
+ return __media_ioctl(filp, cmd, arg, devnode->fops->compat_ioctl);
+}
+
+#endif /* CONFIG_COMPAT */
+
+/* Override for the open function */
+static int media_open(struct inode *inode, struct file *filp)
+{
+ struct media_devnode *devnode;
+ int ret;
+
+ /* Check if the media device is available. This needs to be done with
+ * the media_devnode_lock held to prevent an open/unregister race:
+ * without the lock, the device could be unregistered and freed between
+ * the media_devnode_is_registered() and get_device() calls, leading to
+ * a crash.
+ */
+ mutex_lock(&media_devnode_lock);
+ devnode = container_of(inode->i_cdev, struct media_devnode, cdev);
+ /* return ENXIO if the media device has been removed
+ already or if it is not registered anymore. */
+ if (!media_devnode_is_registered(devnode)) {
+ mutex_unlock(&media_devnode_lock);
+ return -ENXIO;
+ }
+ /* and increase the device refcount */
+ get_device(&devnode->dev);
+ mutex_unlock(&media_devnode_lock);
+
+ filp->private_data = devnode;
+
+ if (devnode->fops->open) {
+ ret = devnode->fops->open(filp);
+ if (ret) {
+ put_device(&devnode->dev);
+ filp->private_data = NULL;
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/* Override for the release function */
+static int media_release(struct inode *inode, struct file *filp)
+{
+ struct media_devnode *devnode = media_devnode_data(filp);
+
+ if (devnode->fops->release)
+ devnode->fops->release(filp);
+
+ filp->private_data = NULL;
+
+ /* decrease the refcount unconditionally since the release()
+ return value is ignored. */
+ put_device(&devnode->dev);
+
+ pr_debug("%s: Media Release\n", __func__);
+ return 0;
+}
+
+static const struct file_operations media_devnode_fops = {
+ .owner = THIS_MODULE,
+ .read = media_read,
+ .write = media_write,
+ .open = media_open,
+ .unlocked_ioctl = media_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = media_compat_ioctl,
+#endif /* CONFIG_COMPAT */
+ .release = media_release,
+ .poll = media_poll,
+ .llseek = no_llseek,
+};
+
+int __must_check media_devnode_register(struct media_device *mdev,
+ struct media_devnode *devnode,
+ struct module *owner)
+{
+ int minor;
+ int ret;
+
+ /* Part 1: Find a free minor number */
+ mutex_lock(&media_devnode_lock);
+ minor = find_first_zero_bit(media_devnode_nums, MEDIA_NUM_DEVICES);
+ if (minor == MEDIA_NUM_DEVICES) {
+ mutex_unlock(&media_devnode_lock);
+ pr_err("could not get a free minor\n");
+ kfree(devnode);
+ return -ENFILE;
+ }
+
+ set_bit(minor, media_devnode_nums);
+ mutex_unlock(&media_devnode_lock);
+
+ devnode->minor = minor;
+ devnode->media_dev = mdev;
+
+ /* Part 1: Initialize dev now to use dev.kobj for cdev.kobj.parent */
+ devnode->dev.bus = &media_bus_type;
+ devnode->dev.devt = MKDEV(MAJOR(media_dev_t), devnode->minor);
+ devnode->dev.release = media_devnode_release;
+ if (devnode->parent)
+ devnode->dev.parent = devnode->parent;
+ dev_set_name(&devnode->dev, "media%d", devnode->minor);
+ device_initialize(&devnode->dev);
+
+ /* Part 2: Initialize the character device */
+ cdev_init(&devnode->cdev, &media_devnode_fops);
+ devnode->cdev.owner = owner;
+ kobject_set_name(&devnode->cdev.kobj, "media%d", devnode->minor);
+
+ /* Part 3: Add the media and char device */
+ ret = cdev_device_add(&devnode->cdev, &devnode->dev);
+ if (ret < 0) {
+ pr_err("%s: cdev_device_add failed\n", __func__);
+ goto cdev_add_error;
+ }
+
+ /* Part 4: Activate this minor. The char device can now be used. */
+ set_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
+
+ return 0;
+
+cdev_add_error:
+ mutex_lock(&media_devnode_lock);
+ clear_bit(devnode->minor, media_devnode_nums);
+ devnode->media_dev = NULL;
+ mutex_unlock(&media_devnode_lock);
+
+ put_device(&devnode->dev);
+ return ret;
+}
+
+void media_devnode_unregister_prepare(struct media_devnode *devnode)
+{
+ /* Check if devnode was ever registered at all */
+ if (!media_devnode_is_registered(devnode))
+ return;
+
+ mutex_lock(&media_devnode_lock);
+ clear_bit(MEDIA_FLAG_REGISTERED, &devnode->flags);
+ mutex_unlock(&media_devnode_lock);
+}
+
+void media_devnode_unregister(struct media_devnode *devnode)
+{
+ mutex_lock(&media_devnode_lock);
+ /* Delete the cdev on this minor as well */
+ cdev_device_del(&devnode->cdev, &devnode->dev);
+ devnode->media_dev = NULL;
+ mutex_unlock(&media_devnode_lock);
+
+ put_device(&devnode->dev);
+}
+
+/*
+ * Initialise media for linux
+ */
+static int __init media_devnode_init(void)
+{
+ int ret;
+
+ pr_info("Linux media interface: v0.10\n");
+ ret = alloc_chrdev_region(&media_dev_t, 0, MEDIA_NUM_DEVICES,
+ MEDIA_NAME);
+ if (ret < 0) {
+ pr_warn("unable to allocate major\n");
+ return ret;
+ }
+
+ ret = bus_register(&media_bus_type);
+ if (ret < 0) {
+ unregister_chrdev_region(media_dev_t, MEDIA_NUM_DEVICES);
+ pr_warn("bus_register failed\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void __exit media_devnode_exit(void)
+{
+ bus_unregister(&media_bus_type);
+ unregister_chrdev_region(media_dev_t, MEDIA_NUM_DEVICES);
+}
+
+subsys_initcall(media_devnode_init);
+module_exit(media_devnode_exit)
+
+MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@ideasonboard.com>");
+MODULE_DESCRIPTION("Device node registration for media drivers");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
new file mode 100644
index 0000000000..83468d4a44
--- /dev/null
+++ b/drivers/media/mc/mc-entity.c
@@ -0,0 +1,1611 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Media entity
+ *
+ * Copyright (C) 2010 Nokia Corporation
+ *
+ * Contacts: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ * Sakari Ailus <sakari.ailus@iki.fi>
+ */
+
+#include <linux/bitmap.h>
+#include <linux/list.h>
+#include <linux/property.h>
+#include <linux/slab.h>
+#include <media/media-entity.h>
+#include <media/media-device.h>
+
+static inline const char *intf_type(struct media_interface *intf)
+{
+ switch (intf->type) {
+ case MEDIA_INTF_T_DVB_FE:
+ return "dvb-frontend";
+ case MEDIA_INTF_T_DVB_DEMUX:
+ return "dvb-demux";
+ case MEDIA_INTF_T_DVB_DVR:
+ return "dvb-dvr";
+ case MEDIA_INTF_T_DVB_CA:
+ return "dvb-ca";
+ case MEDIA_INTF_T_DVB_NET:
+ return "dvb-net";
+ case MEDIA_INTF_T_V4L_VIDEO:
+ return "v4l-video";
+ case MEDIA_INTF_T_V4L_VBI:
+ return "v4l-vbi";
+ case MEDIA_INTF_T_V4L_RADIO:
+ return "v4l-radio";
+ case MEDIA_INTF_T_V4L_SUBDEV:
+ return "v4l-subdev";
+ case MEDIA_INTF_T_V4L_SWRADIO:
+ return "v4l-swradio";
+ case MEDIA_INTF_T_V4L_TOUCH:
+ return "v4l-touch";
+ default:
+ return "unknown-intf";
+ }
+};
+
+static inline const char *link_type_name(struct media_link *link)
+{
+ switch (link->flags & MEDIA_LNK_FL_LINK_TYPE) {
+ case MEDIA_LNK_FL_DATA_LINK:
+ return "data";
+ case MEDIA_LNK_FL_INTERFACE_LINK:
+ return "interface";
+ case MEDIA_LNK_FL_ANCILLARY_LINK:
+ return "ancillary";
+ default:
+ return "unknown";
+ }
+}
+
+__must_check int media_entity_enum_init(struct media_entity_enum *ent_enum,
+ struct media_device *mdev)
+{
+ int idx_max;
+
+ idx_max = ALIGN(mdev->entity_internal_idx_max + 1, BITS_PER_LONG);
+ ent_enum->bmap = bitmap_zalloc(idx_max, GFP_KERNEL);
+ if (!ent_enum->bmap)
+ return -ENOMEM;
+
+ ent_enum->idx_max = idx_max;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(media_entity_enum_init);
+
+void media_entity_enum_cleanup(struct media_entity_enum *ent_enum)
+{
+ bitmap_free(ent_enum->bmap);
+}
+EXPORT_SYMBOL_GPL(media_entity_enum_cleanup);
+
+/**
+ * dev_dbg_obj - Prints in debug mode a change on some object
+ *
+ * @event_name: Name of the event to report. Could be __func__
+ * @gobj: Pointer to the object
+ *
+ * Enabled only if DEBUG or CONFIG_DYNAMIC_DEBUG. Otherwise, it
+ * won't produce any code.
+ */
+static void dev_dbg_obj(const char *event_name, struct media_gobj *gobj)
+{
+#if defined(DEBUG) || defined (CONFIG_DYNAMIC_DEBUG)
+ switch (media_type(gobj)) {
+ case MEDIA_GRAPH_ENTITY:
+ dev_dbg(gobj->mdev->dev,
+ "%s id %u: entity '%s'\n",
+ event_name, media_id(gobj),
+ gobj_to_entity(gobj)->name);
+ break;
+ case MEDIA_GRAPH_LINK:
+ {
+ struct media_link *link = gobj_to_link(gobj);
+
+ dev_dbg(gobj->mdev->dev,
+ "%s id %u: %s link id %u ==> id %u\n",
+ event_name, media_id(gobj), link_type_name(link),
+ media_id(link->gobj0),
+ media_id(link->gobj1));
+ break;
+ }
+ case MEDIA_GRAPH_PAD:
+ {
+ struct media_pad *pad = gobj_to_pad(gobj);
+
+ dev_dbg(gobj->mdev->dev,
+ "%s id %u: %s%spad '%s':%d\n",
+ event_name, media_id(gobj),
+ pad->flags & MEDIA_PAD_FL_SINK ? "sink " : "",
+ pad->flags & MEDIA_PAD_FL_SOURCE ? "source " : "",
+ pad->entity->name, pad->index);
+ break;
+ }
+ case MEDIA_GRAPH_INTF_DEVNODE:
+ {
+ struct media_interface *intf = gobj_to_intf(gobj);
+ struct media_intf_devnode *devnode = intf_to_devnode(intf);
+
+ dev_dbg(gobj->mdev->dev,
+ "%s id %u: intf_devnode %s - major: %d, minor: %d\n",
+ event_name, media_id(gobj),
+ intf_type(intf),
+ devnode->major, devnode->minor);
+ break;
+ }
+ }
+#endif
+}
+
+void media_gobj_create(struct media_device *mdev,
+ enum media_gobj_type type,
+ struct media_gobj *gobj)
+{
+ BUG_ON(!mdev);
+
+ gobj->mdev = mdev;
+
+ /* Create a per-type unique object ID */
+ gobj->id = media_gobj_gen_id(type, ++mdev->id);
+
+ switch (type) {
+ case MEDIA_GRAPH_ENTITY:
+ list_add_tail(&gobj->list, &mdev->entities);
+ break;
+ case MEDIA_GRAPH_PAD:
+ list_add_tail(&gobj->list, &mdev->pads);
+ break;
+ case MEDIA_GRAPH_LINK:
+ list_add_tail(&gobj->list, &mdev->links);
+ break;
+ case MEDIA_GRAPH_INTF_DEVNODE:
+ list_add_tail(&gobj->list, &mdev->interfaces);
+ break;
+ }
+
+ mdev->topology_version++;
+
+ dev_dbg_obj(__func__, gobj);
+}
+
+void media_gobj_destroy(struct media_gobj *gobj)
+{
+ /* Do nothing if the object is not linked. */
+ if (gobj->mdev == NULL)
+ return;
+
+ dev_dbg_obj(__func__, gobj);
+
+ gobj->mdev->topology_version++;
+
+ /* Remove the object from mdev list */
+ list_del(&gobj->list);
+
+ gobj->mdev = NULL;
+}
+
+/*
+ * TODO: Get rid of this.
+ */
+#define MEDIA_ENTITY_MAX_PADS 512
+
+int media_entity_pads_init(struct media_entity *entity, u16 num_pads,
+ struct media_pad *pads)
+{
+ struct media_device *mdev = entity->graph_obj.mdev;
+ struct media_pad *iter;
+ unsigned int i = 0;
+
+ if (num_pads >= MEDIA_ENTITY_MAX_PADS)
+ return -E2BIG;
+
+ entity->num_pads = num_pads;
+ entity->pads = pads;
+
+ if (mdev)
+ mutex_lock(&mdev->graph_mutex);
+
+ media_entity_for_each_pad(entity, iter) {
+ iter->entity = entity;
+ iter->index = i++;
+ if (mdev)
+ media_gobj_create(mdev, MEDIA_GRAPH_PAD,
+ &iter->graph_obj);
+ }
+
+ if (mdev)
+ mutex_unlock(&mdev->graph_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(media_entity_pads_init);
+
+/* -----------------------------------------------------------------------------
+ * Graph traversal
+ */
+
+/**
+ * media_entity_has_pad_interdep - Check interdependency between two pads
+ *
+ * @entity: The entity
+ * @pad0: The first pad index
+ * @pad1: The second pad index
+ *
+ * This function checks the interdependency inside the entity between @pad0
+ * and @pad1. If two pads are interdependent they are part of the same pipeline
+ * and enabling one of the pads means that the other pad will become "locked"
+ * and doesn't allow configuration changes.
+ *
+ * This function uses the &media_entity_operations.has_pad_interdep() operation
+ * to check the dependency inside the entity between @pad0 and @pad1. If the
+ * has_pad_interdep operation is not implemented, all pads of the entity are
+ * considered to be interdependent.
+ *
+ * One of @pad0 and @pad1 must be a sink pad and the other one a source pad.
+ * The function returns false if both pads are sinks or sources.
+ *
+ * The caller must hold entity->graph_obj.mdev->mutex.
+ *
+ * Return: true if the pads are connected internally and false otherwise.
+ */
+static bool media_entity_has_pad_interdep(struct media_entity *entity,
+ unsigned int pad0, unsigned int pad1)
+{
+ if (pad0 >= entity->num_pads || pad1 >= entity->num_pads)
+ return false;
+
+ if (entity->pads[pad0].flags & entity->pads[pad1].flags &
+ (MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_SOURCE))
+ return false;
+
+ if (!entity->ops || !entity->ops->has_pad_interdep)
+ return true;
+
+ return entity->ops->has_pad_interdep(entity, pad0, pad1);
+}
+
+static struct media_entity *
+media_entity_other(struct media_entity *entity, struct media_link *link)
+{
+ if (link->source->entity == entity)
+ return link->sink->entity;
+ else
+ return link->source->entity;
+}
+
+/* push an entity to traversal stack */
+static void stack_push(struct media_graph *graph,
+ struct media_entity *entity)
+{
+ if (graph->top == MEDIA_ENTITY_ENUM_MAX_DEPTH - 1) {
+ WARN_ON(1);
+ return;
+ }
+ graph->top++;
+ graph->stack[graph->top].link = entity->links.next;
+ graph->stack[graph->top].entity = entity;
+}
+
+static struct media_entity *stack_pop(struct media_graph *graph)
+{
+ struct media_entity *entity;
+
+ entity = graph->stack[graph->top].entity;
+ graph->top--;
+
+ return entity;
+}
+
+#define link_top(en) ((en)->stack[(en)->top].link)
+#define stack_top(en) ((en)->stack[(en)->top].entity)
+
+/**
+ * media_graph_walk_init - Allocate resources for graph walk
+ * @graph: Media graph structure that will be used to walk the graph
+ * @mdev: Media device
+ *
+ * Reserve resources for graph walk in media device's current
+ * state. The memory must be released using
+ * media_graph_walk_cleanup().
+ *
+ * Returns error on failure, zero on success.
+ */
+__must_check int media_graph_walk_init(
+ struct media_graph *graph, struct media_device *mdev)
+{
+ return media_entity_enum_init(&graph->ent_enum, mdev);
+}
+EXPORT_SYMBOL_GPL(media_graph_walk_init);
+
+/**
+ * media_graph_walk_cleanup - Release resources related to graph walking
+ * @graph: Media graph structure that was used to walk the graph
+ */
+void media_graph_walk_cleanup(struct media_graph *graph)
+{
+ media_entity_enum_cleanup(&graph->ent_enum);
+}
+EXPORT_SYMBOL_GPL(media_graph_walk_cleanup);
+
+void media_graph_walk_start(struct media_graph *graph,
+ struct media_entity *entity)
+{
+ media_entity_enum_zero(&graph->ent_enum);
+ media_entity_enum_set(&graph->ent_enum, entity);
+
+ graph->top = 0;
+ graph->stack[graph->top].entity = NULL;
+ stack_push(graph, entity);
+ dev_dbg(entity->graph_obj.mdev->dev,
+ "begin graph walk at '%s'\n", entity->name);
+}
+EXPORT_SYMBOL_GPL(media_graph_walk_start);
+
+static void media_graph_walk_iter(struct media_graph *graph)
+{
+ struct media_entity *entity = stack_top(graph);
+ struct media_link *link;
+ struct media_entity *next;
+
+ link = list_entry(link_top(graph), typeof(*link), list);
+
+ /* If the link is not a data link, don't follow it */
+ if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) != MEDIA_LNK_FL_DATA_LINK) {
+ link_top(graph) = link_top(graph)->next;
+ return;
+ }
+
+ /* The link is not enabled so we do not follow. */
+ if (!(link->flags & MEDIA_LNK_FL_ENABLED)) {
+ link_top(graph) = link_top(graph)->next;
+ dev_dbg(entity->graph_obj.mdev->dev,
+ "walk: skipping disabled link '%s':%u -> '%s':%u\n",
+ link->source->entity->name, link->source->index,
+ link->sink->entity->name, link->sink->index);
+ return;
+ }
+
+ /* Get the entity at the other end of the link. */
+ next = media_entity_other(entity, link);
+
+ /* Has the entity already been visited? */
+ if (media_entity_enum_test_and_set(&graph->ent_enum, next)) {
+ link_top(graph) = link_top(graph)->next;
+ dev_dbg(entity->graph_obj.mdev->dev,
+ "walk: skipping entity '%s' (already seen)\n",
+ next->name);
+ return;
+ }
+
+ /* Push the new entity to stack and start over. */
+ link_top(graph) = link_top(graph)->next;
+ stack_push(graph, next);
+ dev_dbg(entity->graph_obj.mdev->dev, "walk: pushing '%s' on stack\n",
+ next->name);
+ lockdep_assert_held(&entity->graph_obj.mdev->graph_mutex);
+}
+
+struct media_entity *media_graph_walk_next(struct media_graph *graph)
+{
+ struct media_entity *entity;
+
+ if (stack_top(graph) == NULL)
+ return NULL;
+
+ /*
+ * Depth first search. Push entity to stack and continue from
+ * top of the stack until no more entities on the level can be
+ * found.
+ */
+ while (link_top(graph) != &stack_top(graph)->links)
+ media_graph_walk_iter(graph);
+
+ entity = stack_pop(graph);
+ dev_dbg(entity->graph_obj.mdev->dev,
+ "walk: returning entity '%s'\n", entity->name);
+
+ return entity;
+}
+EXPORT_SYMBOL_GPL(media_graph_walk_next);
+
+/* -----------------------------------------------------------------------------
+ * Pipeline management
+ */
+
+/*
+ * The pipeline traversal stack stores pads that are reached during graph
+ * traversal, with a list of links to be visited to continue the traversal.
+ * When a new pad is reached, an entry is pushed on the top of the stack and
+ * points to the incoming pad and the first link of the entity.
+ *
+ * To find further pads in the pipeline, the traversal algorithm follows
+ * internal pad dependencies in the entity, and then links in the graph. It
+ * does so by iterating over all links of the entity, and following enabled
+ * links that originate from a pad that is internally connected to the incoming
+ * pad, as reported by the media_entity_has_pad_interdep() function.
+ */
+
+/**
+ * struct media_pipeline_walk_entry - Entry in the pipeline traversal stack
+ *
+ * @pad: The media pad being visited
+ * @links: Links left to be visited
+ */
+struct media_pipeline_walk_entry {
+ struct media_pad *pad;
+ struct list_head *links;
+};
+
+/**
+ * struct media_pipeline_walk - State used by the media pipeline traversal
+ * algorithm
+ *
+ * @mdev: The media device
+ * @stack: Depth-first search stack
+ * @stack.size: Number of allocated entries in @stack.entries
+ * @stack.top: Index of the top stack entry (-1 if the stack is empty)
+ * @stack.entries: Stack entries
+ */
+struct media_pipeline_walk {
+ struct media_device *mdev;
+
+ struct {
+ unsigned int size;
+ int top;
+ struct media_pipeline_walk_entry *entries;
+ } stack;
+};
+
+#define MEDIA_PIPELINE_STACK_GROW_STEP 16
+
+static struct media_pipeline_walk_entry *
+media_pipeline_walk_top(struct media_pipeline_walk *walk)
+{
+ return &walk->stack.entries[walk->stack.top];
+}
+
+static bool media_pipeline_walk_empty(struct media_pipeline_walk *walk)
+{
+ return walk->stack.top == -1;
+}
+
+/* Increase the stack size by MEDIA_PIPELINE_STACK_GROW_STEP elements. */
+static int media_pipeline_walk_resize(struct media_pipeline_walk *walk)
+{
+ struct media_pipeline_walk_entry *entries;
+ unsigned int new_size;
+
+ /* Safety check, to avoid stack overflows in case of bugs. */
+ if (walk->stack.size >= 256)
+ return -E2BIG;
+
+ new_size = walk->stack.size + MEDIA_PIPELINE_STACK_GROW_STEP;
+
+ entries = krealloc(walk->stack.entries,
+ new_size * sizeof(*walk->stack.entries),
+ GFP_KERNEL);
+ if (!entries)
+ return -ENOMEM;
+
+ walk->stack.entries = entries;
+ walk->stack.size = new_size;
+
+ return 0;
+}
+
+/* Push a new entry on the stack. */
+static int media_pipeline_walk_push(struct media_pipeline_walk *walk,
+ struct media_pad *pad)
+{
+ struct media_pipeline_walk_entry *entry;
+ int ret;
+
+ if (walk->stack.top + 1 >= walk->stack.size) {
+ ret = media_pipeline_walk_resize(walk);
+ if (ret)
+ return ret;
+ }
+
+ walk->stack.top++;
+ entry = media_pipeline_walk_top(walk);
+ entry->pad = pad;
+ entry->links = pad->entity->links.next;
+
+ dev_dbg(walk->mdev->dev,
+ "media pipeline: pushed entry %u: '%s':%u\n",
+ walk->stack.top, pad->entity->name, pad->index);
+
+ return 0;
+}
+
+/*
+ * Move the top entry link cursor to the next link. If all links of the entry
+ * have been visited, pop the entry itself.
+ */
+static void media_pipeline_walk_pop(struct media_pipeline_walk *walk)
+{
+ struct media_pipeline_walk_entry *entry;
+
+ if (WARN_ON(walk->stack.top < 0))
+ return;
+
+ entry = media_pipeline_walk_top(walk);
+
+ if (entry->links->next == &entry->pad->entity->links) {
+ dev_dbg(walk->mdev->dev,
+ "media pipeline: entry %u has no more links, popping\n",
+ walk->stack.top);
+
+ walk->stack.top--;
+ return;
+ }
+
+ entry->links = entry->links->next;
+
+ dev_dbg(walk->mdev->dev,
+ "media pipeline: moved entry %u to next link\n",
+ walk->stack.top);
+}
+
+/* Free all memory allocated while walking the pipeline. */
+static void media_pipeline_walk_destroy(struct media_pipeline_walk *walk)
+{
+ kfree(walk->stack.entries);
+}
+
+/* Add a pad to the pipeline and push it to the stack. */
+static int media_pipeline_add_pad(struct media_pipeline *pipe,
+ struct media_pipeline_walk *walk,
+ struct media_pad *pad)
+{
+ struct media_pipeline_pad *ppad;
+
+ list_for_each_entry(ppad, &pipe->pads, list) {
+ if (ppad->pad == pad) {
+ dev_dbg(pad->graph_obj.mdev->dev,
+ "media pipeline: already contains pad '%s':%u\n",
+ pad->entity->name, pad->index);
+ return 0;
+ }
+ }
+
+ ppad = kzalloc(sizeof(*ppad), GFP_KERNEL);
+ if (!ppad)
+ return -ENOMEM;
+
+ ppad->pipe = pipe;
+ ppad->pad = pad;
+
+ list_add_tail(&ppad->list, &pipe->pads);
+
+ dev_dbg(pad->graph_obj.mdev->dev,
+ "media pipeline: added pad '%s':%u\n",
+ pad->entity->name, pad->index);
+
+ return media_pipeline_walk_push(walk, pad);
+}
+
+/* Explore the next link of the entity at the top of the stack. */
+static int media_pipeline_explore_next_link(struct media_pipeline *pipe,
+ struct media_pipeline_walk *walk)
+{
+ struct media_pipeline_walk_entry *entry = media_pipeline_walk_top(walk);
+ struct media_pad *pad;
+ struct media_link *link;
+ struct media_pad *local;
+ struct media_pad *remote;
+ int ret;
+
+ pad = entry->pad;
+ link = list_entry(entry->links, typeof(*link), list);
+ media_pipeline_walk_pop(walk);
+
+ dev_dbg(walk->mdev->dev,
+ "media pipeline: exploring link '%s':%u -> '%s':%u\n",
+ link->source->entity->name, link->source->index,
+ link->sink->entity->name, link->sink->index);
+
+ /* Skip links that are not enabled. */
+ if (!(link->flags & MEDIA_LNK_FL_ENABLED)) {
+ dev_dbg(walk->mdev->dev,
+ "media pipeline: skipping link (disabled)\n");
+ return 0;
+ }
+
+ /* Get the local pad and remote pad. */
+ if (link->source->entity == pad->entity) {
+ local = link->source;
+ remote = link->sink;
+ } else {
+ local = link->sink;
+ remote = link->source;
+ }
+
+ /*
+ * Skip links that originate from a different pad than the incoming pad
+ * that is not connected internally in the entity to the incoming pad.
+ */
+ if (pad != local &&
+ !media_entity_has_pad_interdep(pad->entity, pad->index, local->index)) {
+ dev_dbg(walk->mdev->dev,
+ "media pipeline: skipping link (no route)\n");
+ return 0;
+ }
+
+ /*
+ * Add the local and remote pads of the link to the pipeline and push
+ * them to the stack, if they're not already present.
+ */
+ ret = media_pipeline_add_pad(pipe, walk, local);
+ if (ret)
+ return ret;
+
+ ret = media_pipeline_add_pad(pipe, walk, remote);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void media_pipeline_cleanup(struct media_pipeline *pipe)
+{
+ while (!list_empty(&pipe->pads)) {
+ struct media_pipeline_pad *ppad;
+
+ ppad = list_first_entry(&pipe->pads, typeof(*ppad), list);
+ list_del(&ppad->list);
+ kfree(ppad);
+ }
+}
+
+static int media_pipeline_populate(struct media_pipeline *pipe,
+ struct media_pad *pad)
+{
+ struct media_pipeline_walk walk = { };
+ struct media_pipeline_pad *ppad;
+ int ret;
+
+ /*
+ * Populate the media pipeline by walking the media graph, starting
+ * from @pad.
+ */
+ INIT_LIST_HEAD(&pipe->pads);
+ pipe->mdev = pad->graph_obj.mdev;
+
+ walk.mdev = pipe->mdev;
+ walk.stack.top = -1;
+ ret = media_pipeline_add_pad(pipe, &walk, pad);
+ if (ret)
+ goto done;
+
+ /*
+ * Use a depth-first search algorithm: as long as the stack is not
+ * empty, explore the next link of the top entry. The
+ * media_pipeline_explore_next_link() function will either move to the
+ * next link, pop the entry if fully visited, or add new entries on
+ * top.
+ */
+ while (!media_pipeline_walk_empty(&walk)) {
+ ret = media_pipeline_explore_next_link(pipe, &walk);
+ if (ret)
+ goto done;
+ }
+
+ dev_dbg(pad->graph_obj.mdev->dev,
+ "media pipeline populated, found pads:\n");
+
+ list_for_each_entry(ppad, &pipe->pads, list)
+ dev_dbg(pad->graph_obj.mdev->dev, "- '%s':%u\n",
+ ppad->pad->entity->name, ppad->pad->index);
+
+ WARN_ON(walk.stack.top != -1);
+
+ ret = 0;
+
+done:
+ media_pipeline_walk_destroy(&walk);
+
+ if (ret)
+ media_pipeline_cleanup(pipe);
+
+ return ret;
+}
+
+__must_check int __media_pipeline_start(struct media_pad *pad,
+ struct media_pipeline *pipe)
+{
+ struct media_device *mdev = pad->graph_obj.mdev;
+ struct media_pipeline_pad *err_ppad;
+ struct media_pipeline_pad *ppad;
+ int ret;
+
+ lockdep_assert_held(&mdev->graph_mutex);
+
+ /*
+ * If the pad is already part of a pipeline, that pipeline must be the
+ * same as the pipe given to media_pipeline_start().
+ */
+ if (WARN_ON(pad->pipe && pad->pipe != pipe))
+ return -EINVAL;
+
+ /*
+ * If the pipeline has already been started, it is guaranteed to be
+ * valid, so just increase the start count.
+ */
+ if (pipe->start_count) {
+ pipe->start_count++;
+ return 0;
+ }
+
+ /*
+ * Populate the pipeline. This populates the media_pipeline pads list
+ * with media_pipeline_pad instances for each pad found during graph
+ * walk.
+ */
+ ret = media_pipeline_populate(pipe, pad);
+ if (ret)
+ return ret;
+
+ /*
+ * Now that all the pads in the pipeline have been gathered, perform
+ * the validation steps.
+ */
+
+ list_for_each_entry(ppad, &pipe->pads, list) {
+ struct media_pad *pad = ppad->pad;
+ struct media_entity *entity = pad->entity;
+ bool has_enabled_link = false;
+ bool has_link = false;
+ struct media_link *link;
+
+ dev_dbg(mdev->dev, "Validating pad '%s':%u\n", pad->entity->name,
+ pad->index);
+
+ /*
+ * 1. Ensure that the pad doesn't already belong to a different
+ * pipeline.
+ */
+ if (pad->pipe) {
+ dev_dbg(mdev->dev, "Failed to start pipeline: pad '%s':%u busy\n",
+ pad->entity->name, pad->index);
+ ret = -EBUSY;
+ goto error;
+ }
+
+ /*
+ * 2. Validate all active links whose sink is the current pad.
+ * Validation of the source pads is performed in the context of
+ * the connected sink pad to avoid duplicating checks.
+ */
+ for_each_media_entity_data_link(entity, link) {
+ /* Skip links unrelated to the current pad. */
+ if (link->sink != pad && link->source != pad)
+ continue;
+
+ /* Record if the pad has links and enabled links. */
+ if (link->flags & MEDIA_LNK_FL_ENABLED)
+ has_enabled_link = true;
+ has_link = true;
+
+ /*
+ * Validate the link if it's enabled and has the
+ * current pad as its sink.
+ */
+ if (!(link->flags & MEDIA_LNK_FL_ENABLED))
+ continue;
+
+ if (link->sink != pad)
+ continue;
+
+ if (!entity->ops || !entity->ops->link_validate)
+ continue;
+
+ ret = entity->ops->link_validate(link);
+ if (ret) {
+ dev_dbg(mdev->dev,
+ "Link '%s':%u -> '%s':%u failed validation: %d\n",
+ link->source->entity->name,
+ link->source->index,
+ link->sink->entity->name,
+ link->sink->index, ret);
+ goto error;
+ }
+
+ dev_dbg(mdev->dev,
+ "Link '%s':%u -> '%s':%u is valid\n",
+ link->source->entity->name,
+ link->source->index,
+ link->sink->entity->name,
+ link->sink->index);
+ }
+
+ /*
+ * 3. If the pad has the MEDIA_PAD_FL_MUST_CONNECT flag set,
+ * ensure that it has either no link or an enabled link.
+ */
+ if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) && has_link &&
+ !has_enabled_link) {
+ dev_dbg(mdev->dev,
+ "Pad '%s':%u must be connected by an enabled link\n",
+ pad->entity->name, pad->index);
+ ret = -ENOLINK;
+ goto error;
+ }
+
+ /* Validation passed, store the pipe pointer in the pad. */
+ pad->pipe = pipe;
+ }
+
+ pipe->start_count++;
+
+ return 0;
+
+error:
+ /*
+ * Link validation on graph failed. We revert what we did and
+ * return the error.
+ */
+
+ list_for_each_entry(err_ppad, &pipe->pads, list) {
+ if (err_ppad == ppad)
+ break;
+
+ err_ppad->pad->pipe = NULL;
+ }
+
+ media_pipeline_cleanup(pipe);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__media_pipeline_start);
+
+__must_check int media_pipeline_start(struct media_pad *pad,
+ struct media_pipeline *pipe)
+{
+ struct media_device *mdev = pad->graph_obj.mdev;
+ int ret;
+
+ mutex_lock(&mdev->graph_mutex);
+ ret = __media_pipeline_start(pad, pipe);
+ mutex_unlock(&mdev->graph_mutex);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(media_pipeline_start);
+
+void __media_pipeline_stop(struct media_pad *pad)
+{
+ struct media_pipeline *pipe = pad->pipe;
+ struct media_pipeline_pad *ppad;
+
+ /*
+ * If the following check fails, the driver has performed an
+ * unbalanced call to media_pipeline_stop()
+ */
+ if (WARN_ON(!pipe))
+ return;
+
+ if (--pipe->start_count)
+ return;
+
+ list_for_each_entry(ppad, &pipe->pads, list)
+ ppad->pad->pipe = NULL;
+
+ media_pipeline_cleanup(pipe);
+
+ if (pipe->allocated)
+ kfree(pipe);
+}
+EXPORT_SYMBOL_GPL(__media_pipeline_stop);
+
+void media_pipeline_stop(struct media_pad *pad)
+{
+ struct media_device *mdev = pad->graph_obj.mdev;
+
+ mutex_lock(&mdev->graph_mutex);
+ __media_pipeline_stop(pad);
+ mutex_unlock(&mdev->graph_mutex);
+}
+EXPORT_SYMBOL_GPL(media_pipeline_stop);
+
+__must_check int media_pipeline_alloc_start(struct media_pad *pad)
+{
+ struct media_device *mdev = pad->graph_obj.mdev;
+ struct media_pipeline *new_pipe = NULL;
+ struct media_pipeline *pipe;
+ int ret;
+
+ mutex_lock(&mdev->graph_mutex);
+
+ /*
+ * Is the pad already part of a pipeline? If not, we need to allocate
+ * a pipe.
+ */
+ pipe = media_pad_pipeline(pad);
+ if (!pipe) {
+ new_pipe = kzalloc(sizeof(*new_pipe), GFP_KERNEL);
+ if (!new_pipe) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ pipe = new_pipe;
+ pipe->allocated = true;
+ }
+
+ ret = __media_pipeline_start(pad, pipe);
+ if (ret)
+ kfree(new_pipe);
+
+out:
+ mutex_unlock(&mdev->graph_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(media_pipeline_alloc_start);
+
+struct media_pad *
+__media_pipeline_pad_iter_next(struct media_pipeline *pipe,
+ struct media_pipeline_pad_iter *iter,
+ struct media_pad *pad)
+{
+ if (!pad)
+ iter->cursor = pipe->pads.next;
+
+ if (iter->cursor == &pipe->pads)
+ return NULL;
+
+ pad = list_entry(iter->cursor, struct media_pipeline_pad, list)->pad;
+ iter->cursor = iter->cursor->next;
+
+ return pad;
+}
+EXPORT_SYMBOL_GPL(__media_pipeline_pad_iter_next);
+
+int media_pipeline_entity_iter_init(struct media_pipeline *pipe,
+ struct media_pipeline_entity_iter *iter)
+{
+ return media_entity_enum_init(&iter->ent_enum, pipe->mdev);
+}
+EXPORT_SYMBOL_GPL(media_pipeline_entity_iter_init);
+
+void media_pipeline_entity_iter_cleanup(struct media_pipeline_entity_iter *iter)
+{
+ media_entity_enum_cleanup(&iter->ent_enum);
+}
+EXPORT_SYMBOL_GPL(media_pipeline_entity_iter_cleanup);
+
+struct media_entity *
+__media_pipeline_entity_iter_next(struct media_pipeline *pipe,
+ struct media_pipeline_entity_iter *iter,
+ struct media_entity *entity)
+{
+ if (!entity)
+ iter->cursor = pipe->pads.next;
+
+ while (iter->cursor != &pipe->pads) {
+ struct media_pipeline_pad *ppad;
+ struct media_entity *entity;
+
+ ppad = list_entry(iter->cursor, struct media_pipeline_pad, list);
+ entity = ppad->pad->entity;
+ iter->cursor = iter->cursor->next;
+
+ if (!media_entity_enum_test_and_set(&iter->ent_enum, entity))
+ return entity;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(__media_pipeline_entity_iter_next);
+
+/* -----------------------------------------------------------------------------
+ * Links management
+ */
+
+static struct media_link *media_add_link(struct list_head *head)
+{
+ struct media_link *link;
+
+ link = kzalloc(sizeof(*link), GFP_KERNEL);
+ if (link == NULL)
+ return NULL;
+
+ list_add_tail(&link->list, head);
+
+ return link;
+}
+
+static void __media_entity_remove_link(struct media_entity *entity,
+ struct media_link *link)
+{
+ struct media_link *rlink, *tmp;
+ struct media_entity *remote;
+
+ /* Remove the reverse links for a data link. */
+ if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) == MEDIA_LNK_FL_DATA_LINK) {
+ if (link->source->entity == entity)
+ remote = link->sink->entity;
+ else
+ remote = link->source->entity;
+
+ list_for_each_entry_safe(rlink, tmp, &remote->links, list) {
+ if (rlink != link->reverse)
+ continue;
+
+ if (link->source->entity == entity)
+ remote->num_backlinks--;
+
+ /* Remove the remote link */
+ list_del(&rlink->list);
+ media_gobj_destroy(&rlink->graph_obj);
+ kfree(rlink);
+
+ if (--remote->num_links == 0)
+ break;
+ }
+ }
+
+ list_del(&link->list);
+ media_gobj_destroy(&link->graph_obj);
+ kfree(link);
+}
+
+int media_get_pad_index(struct media_entity *entity, u32 pad_type,
+ enum media_pad_signal_type sig_type)
+{
+ unsigned int i;
+
+ if (!entity)
+ return -EINVAL;
+
+ for (i = 0; i < entity->num_pads; i++) {
+ if ((entity->pads[i].flags &
+ (MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_SOURCE)) != pad_type)
+ continue;
+
+ if (entity->pads[i].sig_type == sig_type)
+ return i;
+ }
+ return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(media_get_pad_index);
+
+int
+media_create_pad_link(struct media_entity *source, u16 source_pad,
+ struct media_entity *sink, u16 sink_pad, u32 flags)
+{
+ struct media_link *link;
+ struct media_link *backlink;
+
+ if (WARN_ON(!source || !sink) ||
+ WARN_ON(source_pad >= source->num_pads) ||
+ WARN_ON(sink_pad >= sink->num_pads))
+ return -EINVAL;
+ if (WARN_ON(!(source->pads[source_pad].flags & MEDIA_PAD_FL_SOURCE)))
+ return -EINVAL;
+ if (WARN_ON(!(sink->pads[sink_pad].flags & MEDIA_PAD_FL_SINK)))
+ return -EINVAL;
+
+ link = media_add_link(&source->links);
+ if (link == NULL)
+ return -ENOMEM;
+
+ link->source = &source->pads[source_pad];
+ link->sink = &sink->pads[sink_pad];
+ link->flags = flags & ~MEDIA_LNK_FL_INTERFACE_LINK;
+
+ /* Initialize graph object embedded at the new link */
+ media_gobj_create(source->graph_obj.mdev, MEDIA_GRAPH_LINK,
+ &link->graph_obj);
+
+ /* Create the backlink. Backlinks are used to help graph traversal and
+ * are not reported to userspace.
+ */
+ backlink = media_add_link(&sink->links);
+ if (backlink == NULL) {
+ __media_entity_remove_link(source, link);
+ return -ENOMEM;
+ }
+
+ backlink->source = &source->pads[source_pad];
+ backlink->sink = &sink->pads[sink_pad];
+ backlink->flags = flags;
+ backlink->is_backlink = true;
+
+ /* Initialize graph object embedded at the new link */
+ media_gobj_create(sink->graph_obj.mdev, MEDIA_GRAPH_LINK,
+ &backlink->graph_obj);
+
+ link->reverse = backlink;
+ backlink->reverse = link;
+
+ sink->num_backlinks++;
+ sink->num_links++;
+ source->num_links++;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(media_create_pad_link);
+
+int media_create_pad_links(const struct media_device *mdev,
+ const u32 source_function,
+ struct media_entity *source,
+ const u16 source_pad,
+ const u32 sink_function,
+ struct media_entity *sink,
+ const u16 sink_pad,
+ u32 flags,
+ const bool allow_both_undefined)
+{
+ struct media_entity *entity;
+ unsigned function;
+ int ret;
+
+ /* Trivial case: 1:1 relation */
+ if (source && sink)
+ return media_create_pad_link(source, source_pad,
+ sink, sink_pad, flags);
+
+ /* Worse case scenario: n:n relation */
+ if (!source && !sink) {
+ if (!allow_both_undefined)
+ return 0;
+ media_device_for_each_entity(source, mdev) {
+ if (source->function != source_function)
+ continue;
+ media_device_for_each_entity(sink, mdev) {
+ if (sink->function != sink_function)
+ continue;
+ ret = media_create_pad_link(source, source_pad,
+ sink, sink_pad,
+ flags);
+ if (ret)
+ return ret;
+ flags &= ~(MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_IMMUTABLE);
+ }
+ }
+ return 0;
+ }
+
+ /* Handle 1:n and n:1 cases */
+ if (source)
+ function = sink_function;
+ else
+ function = source_function;
+
+ media_device_for_each_entity(entity, mdev) {
+ if (entity->function != function)
+ continue;
+
+ if (source)
+ ret = media_create_pad_link(source, source_pad,
+ entity, sink_pad, flags);
+ else
+ ret = media_create_pad_link(entity, source_pad,
+ sink, sink_pad, flags);
+ if (ret)
+ return ret;
+ flags &= ~(MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE);
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(media_create_pad_links);
+
+void __media_entity_remove_links(struct media_entity *entity)
+{
+ struct media_link *link, *tmp;
+
+ list_for_each_entry_safe(link, tmp, &entity->links, list)
+ __media_entity_remove_link(entity, link);
+
+ entity->num_links = 0;
+ entity->num_backlinks = 0;
+}
+EXPORT_SYMBOL_GPL(__media_entity_remove_links);
+
+void media_entity_remove_links(struct media_entity *entity)
+{
+ struct media_device *mdev = entity->graph_obj.mdev;
+
+ /* Do nothing if the entity is not registered. */
+ if (mdev == NULL)
+ return;
+
+ mutex_lock(&mdev->graph_mutex);
+ __media_entity_remove_links(entity);
+ mutex_unlock(&mdev->graph_mutex);
+}
+EXPORT_SYMBOL_GPL(media_entity_remove_links);
+
+static int __media_entity_setup_link_notify(struct media_link *link, u32 flags)
+{
+ int ret;
+
+ /* Notify both entities. */
+ ret = media_entity_call(link->source->entity, link_setup,
+ link->source, link->sink, flags);
+ if (ret < 0 && ret != -ENOIOCTLCMD)
+ return ret;
+
+ ret = media_entity_call(link->sink->entity, link_setup,
+ link->sink, link->source, flags);
+ if (ret < 0 && ret != -ENOIOCTLCMD) {
+ media_entity_call(link->source->entity, link_setup,
+ link->source, link->sink, link->flags);
+ return ret;
+ }
+
+ link->flags = flags;
+ link->reverse->flags = link->flags;
+
+ return 0;
+}
+
+int __media_entity_setup_link(struct media_link *link, u32 flags)
+{
+ const u32 mask = MEDIA_LNK_FL_ENABLED;
+ struct media_device *mdev;
+ struct media_pad *source, *sink;
+ int ret = -EBUSY;
+
+ if (link == NULL)
+ return -EINVAL;
+
+ /* The non-modifiable link flags must not be modified. */
+ if ((link->flags & ~mask) != (flags & ~mask))
+ return -EINVAL;
+
+ if (link->flags & MEDIA_LNK_FL_IMMUTABLE)
+ return link->flags == flags ? 0 : -EINVAL;
+
+ if (link->flags == flags)
+ return 0;
+
+ source = link->source;
+ sink = link->sink;
+
+ if (!(link->flags & MEDIA_LNK_FL_DYNAMIC) &&
+ (media_pad_is_streaming(source) || media_pad_is_streaming(sink)))
+ return -EBUSY;
+
+ mdev = source->graph_obj.mdev;
+
+ if (mdev->ops && mdev->ops->link_notify) {
+ ret = mdev->ops->link_notify(link, flags,
+ MEDIA_DEV_NOTIFY_PRE_LINK_CH);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = __media_entity_setup_link_notify(link, flags);
+
+ if (mdev->ops && mdev->ops->link_notify)
+ mdev->ops->link_notify(link, flags,
+ MEDIA_DEV_NOTIFY_POST_LINK_CH);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__media_entity_setup_link);
+
+int media_entity_setup_link(struct media_link *link, u32 flags)
+{
+ int ret;
+
+ mutex_lock(&link->graph_obj.mdev->graph_mutex);
+ ret = __media_entity_setup_link(link, flags);
+ mutex_unlock(&link->graph_obj.mdev->graph_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(media_entity_setup_link);
+
+struct media_link *
+media_entity_find_link(struct media_pad *source, struct media_pad *sink)
+{
+ struct media_link *link;
+
+ for_each_media_entity_data_link(source->entity, link) {
+ if (link->source->entity == source->entity &&
+ link->source->index == source->index &&
+ link->sink->entity == sink->entity &&
+ link->sink->index == sink->index)
+ return link;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(media_entity_find_link);
+
+struct media_pad *media_pad_remote_pad_first(const struct media_pad *pad)
+{
+ struct media_link *link;
+
+ for_each_media_entity_data_link(pad->entity, link) {
+ if (!(link->flags & MEDIA_LNK_FL_ENABLED))
+ continue;
+
+ if (link->source == pad)
+ return link->sink;
+
+ if (link->sink == pad)
+ return link->source;
+ }
+
+ return NULL;
+
+}
+EXPORT_SYMBOL_GPL(media_pad_remote_pad_first);
+
+struct media_pad *
+media_entity_remote_pad_unique(const struct media_entity *entity,
+ unsigned int type)
+{
+ struct media_pad *pad = NULL;
+ struct media_link *link;
+
+ list_for_each_entry(link, &entity->links, list) {
+ struct media_pad *local_pad;
+ struct media_pad *remote_pad;
+
+ if (((link->flags & MEDIA_LNK_FL_LINK_TYPE) !=
+ MEDIA_LNK_FL_DATA_LINK) ||
+ !(link->flags & MEDIA_LNK_FL_ENABLED))
+ continue;
+
+ if (type == MEDIA_PAD_FL_SOURCE) {
+ local_pad = link->sink;
+ remote_pad = link->source;
+ } else {
+ local_pad = link->source;
+ remote_pad = link->sink;
+ }
+
+ if (local_pad->entity == entity) {
+ if (pad)
+ return ERR_PTR(-ENOTUNIQ);
+
+ pad = remote_pad;
+ }
+ }
+
+ if (!pad)
+ return ERR_PTR(-ENOLINK);
+
+ return pad;
+}
+EXPORT_SYMBOL_GPL(media_entity_remote_pad_unique);
+
+struct media_pad *media_pad_remote_pad_unique(const struct media_pad *pad)
+{
+ struct media_pad *found_pad = NULL;
+ struct media_link *link;
+
+ list_for_each_entry(link, &pad->entity->links, list) {
+ struct media_pad *remote_pad;
+
+ if (!(link->flags & MEDIA_LNK_FL_ENABLED))
+ continue;
+
+ if (link->sink == pad)
+ remote_pad = link->source;
+ else if (link->source == pad)
+ remote_pad = link->sink;
+ else
+ continue;
+
+ if (found_pad)
+ return ERR_PTR(-ENOTUNIQ);
+
+ found_pad = remote_pad;
+ }
+
+ if (!found_pad)
+ return ERR_PTR(-ENOLINK);
+
+ return found_pad;
+}
+EXPORT_SYMBOL_GPL(media_pad_remote_pad_unique);
+
+int media_entity_get_fwnode_pad(struct media_entity *entity,
+ const struct fwnode_handle *fwnode,
+ unsigned long direction_flags)
+{
+ struct fwnode_endpoint endpoint;
+ unsigned int i;
+ int ret;
+
+ if (!entity->ops || !entity->ops->get_fwnode_pad) {
+ for (i = 0; i < entity->num_pads; i++) {
+ if (entity->pads[i].flags & direction_flags)
+ return i;
+ }
+
+ return -ENXIO;
+ }
+
+ ret = fwnode_graph_parse_endpoint(fwnode, &endpoint);
+ if (ret)
+ return ret;
+
+ ret = entity->ops->get_fwnode_pad(entity, &endpoint);
+ if (ret < 0)
+ return ret;
+
+ if (ret >= entity->num_pads)
+ return -ENXIO;
+
+ if (!(entity->pads[ret].flags & direction_flags))
+ return -ENXIO;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(media_entity_get_fwnode_pad);
+
+struct media_pipeline *media_entity_pipeline(struct media_entity *entity)
+{
+ struct media_pad *pad;
+
+ media_entity_for_each_pad(entity, pad) {
+ if (pad->pipe)
+ return pad->pipe;
+ }
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(media_entity_pipeline);
+
+struct media_pipeline *media_pad_pipeline(struct media_pad *pad)
+{
+ return pad->pipe;
+}
+EXPORT_SYMBOL_GPL(media_pad_pipeline);
+
+static void media_interface_init(struct media_device *mdev,
+ struct media_interface *intf,
+ u32 gobj_type,
+ u32 intf_type, u32 flags)
+{
+ intf->type = intf_type;
+ intf->flags = flags;
+ INIT_LIST_HEAD(&intf->links);
+
+ media_gobj_create(mdev, gobj_type, &intf->graph_obj);
+}
+
+/* Functions related to the media interface via device nodes */
+
+struct media_intf_devnode *media_devnode_create(struct media_device *mdev,
+ u32 type, u32 flags,
+ u32 major, u32 minor)
+{
+ struct media_intf_devnode *devnode;
+
+ devnode = kzalloc(sizeof(*devnode), GFP_KERNEL);
+ if (!devnode)
+ return NULL;
+
+ devnode->major = major;
+ devnode->minor = minor;
+
+ media_interface_init(mdev, &devnode->intf, MEDIA_GRAPH_INTF_DEVNODE,
+ type, flags);
+
+ return devnode;
+}
+EXPORT_SYMBOL_GPL(media_devnode_create);
+
+void media_devnode_remove(struct media_intf_devnode *devnode)
+{
+ media_remove_intf_links(&devnode->intf);
+ media_gobj_destroy(&devnode->intf.graph_obj);
+ kfree(devnode);
+}
+EXPORT_SYMBOL_GPL(media_devnode_remove);
+
+struct media_link *media_create_intf_link(struct media_entity *entity,
+ struct media_interface *intf,
+ u32 flags)
+{
+ struct media_link *link;
+
+ link = media_add_link(&intf->links);
+ if (link == NULL)
+ return NULL;
+
+ link->intf = intf;
+ link->entity = entity;
+ link->flags = flags | MEDIA_LNK_FL_INTERFACE_LINK;
+
+ /* Initialize graph object embedded at the new link */
+ media_gobj_create(intf->graph_obj.mdev, MEDIA_GRAPH_LINK,
+ &link->graph_obj);
+
+ return link;
+}
+EXPORT_SYMBOL_GPL(media_create_intf_link);
+
+void __media_remove_intf_link(struct media_link *link)
+{
+ list_del(&link->list);
+ media_gobj_destroy(&link->graph_obj);
+ kfree(link);
+}
+EXPORT_SYMBOL_GPL(__media_remove_intf_link);
+
+void media_remove_intf_link(struct media_link *link)
+{
+ struct media_device *mdev = link->graph_obj.mdev;
+
+ /* Do nothing if the intf is not registered. */
+ if (mdev == NULL)
+ return;
+
+ mutex_lock(&mdev->graph_mutex);
+ __media_remove_intf_link(link);
+ mutex_unlock(&mdev->graph_mutex);
+}
+EXPORT_SYMBOL_GPL(media_remove_intf_link);
+
+void __media_remove_intf_links(struct media_interface *intf)
+{
+ struct media_link *link, *tmp;
+
+ list_for_each_entry_safe(link, tmp, &intf->links, list)
+ __media_remove_intf_link(link);
+
+}
+EXPORT_SYMBOL_GPL(__media_remove_intf_links);
+
+void media_remove_intf_links(struct media_interface *intf)
+{
+ struct media_device *mdev = intf->graph_obj.mdev;
+
+ /* Do nothing if the intf is not registered. */
+ if (mdev == NULL)
+ return;
+
+ mutex_lock(&mdev->graph_mutex);
+ __media_remove_intf_links(intf);
+ mutex_unlock(&mdev->graph_mutex);
+}
+EXPORT_SYMBOL_GPL(media_remove_intf_links);
+
+struct media_link *media_create_ancillary_link(struct media_entity *primary,
+ struct media_entity *ancillary)
+{
+ struct media_link *link;
+
+ link = media_add_link(&primary->links);
+ if (!link)
+ return ERR_PTR(-ENOMEM);
+
+ link->gobj0 = &primary->graph_obj;
+ link->gobj1 = &ancillary->graph_obj;
+ link->flags = MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED |
+ MEDIA_LNK_FL_ANCILLARY_LINK;
+
+ /* Initialize graph object embedded in the new link */
+ media_gobj_create(primary->graph_obj.mdev, MEDIA_GRAPH_LINK,
+ &link->graph_obj);
+
+ return link;
+}
+EXPORT_SYMBOL_GPL(media_create_ancillary_link);
+
+struct media_link *__media_entity_next_link(struct media_entity *entity,
+ struct media_link *link,
+ unsigned long link_type)
+{
+ link = link ? list_next_entry(link, list)
+ : list_first_entry(&entity->links, typeof(*link), list);
+
+ list_for_each_entry_from(link, &entity->links, list)
+ if ((link->flags & MEDIA_LNK_FL_LINK_TYPE) == link_type)
+ return link;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(__media_entity_next_link);
diff --git a/drivers/media/mc/mc-request.c b/drivers/media/mc/mc-request.c
new file mode 100644
index 0000000000..addb8f2d89
--- /dev/null
+++ b/drivers/media/mc/mc-request.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Media device request objects
+ *
+ * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ * Copyright (C) 2018 Intel Corporation
+ * Copyright (C) 2018 Google, Inc.
+ *
+ * Author: Hans Verkuil <hans.verkuil@cisco.com>
+ * Author: Sakari Ailus <sakari.ailus@linux.intel.com>
+ */
+
+#include <linux/anon_inodes.h>
+#include <linux/file.h>
+#include <linux/refcount.h>
+
+#include <media/media-device.h>
+#include <media/media-request.h>
+
+static const char * const request_state[] = {
+ [MEDIA_REQUEST_STATE_IDLE] = "idle",
+ [MEDIA_REQUEST_STATE_VALIDATING] = "validating",
+ [MEDIA_REQUEST_STATE_QUEUED] = "queued",
+ [MEDIA_REQUEST_STATE_COMPLETE] = "complete",
+ [MEDIA_REQUEST_STATE_CLEANING] = "cleaning",
+ [MEDIA_REQUEST_STATE_UPDATING] = "updating",
+};
+
+static const char *
+media_request_state_str(enum media_request_state state)
+{
+ BUILD_BUG_ON(ARRAY_SIZE(request_state) != NR_OF_MEDIA_REQUEST_STATE);
+
+ if (WARN_ON(state >= ARRAY_SIZE(request_state)))
+ return "invalid";
+ return request_state[state];
+}
+
+static void media_request_clean(struct media_request *req)
+{
+ struct media_request_object *obj, *obj_safe;
+
+ /* Just a sanity check. No other code path is allowed to change this. */
+ WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING);
+ WARN_ON(req->updating_count);
+ WARN_ON(req->access_count);
+
+ list_for_each_entry_safe(obj, obj_safe, &req->objects, list) {
+ media_request_object_unbind(obj);
+ media_request_object_put(obj);
+ }
+
+ req->updating_count = 0;
+ req->access_count = 0;
+ WARN_ON(req->num_incomplete_objects);
+ req->num_incomplete_objects = 0;
+ wake_up_interruptible_all(&req->poll_wait);
+}
+
+static void media_request_release(struct kref *kref)
+{
+ struct media_request *req =
+ container_of(kref, struct media_request, kref);
+ struct media_device *mdev = req->mdev;
+
+ dev_dbg(mdev->dev, "request: release %s\n", req->debug_str);
+
+ /* No other users, no need for a spinlock */
+ req->state = MEDIA_REQUEST_STATE_CLEANING;
+
+ media_request_clean(req);
+
+ if (mdev->ops->req_free)
+ mdev->ops->req_free(req);
+ else
+ kfree(req);
+}
+
+void media_request_put(struct media_request *req)
+{
+ kref_put(&req->kref, media_request_release);
+}
+EXPORT_SYMBOL_GPL(media_request_put);
+
+static int media_request_close(struct inode *inode, struct file *filp)
+{
+ struct media_request *req = filp->private_data;
+
+ media_request_put(req);
+ return 0;
+}
+
+static __poll_t media_request_poll(struct file *filp,
+ struct poll_table_struct *wait)
+{
+ struct media_request *req = filp->private_data;
+ unsigned long flags;
+ __poll_t ret = 0;
+
+ if (!(poll_requested_events(wait) & EPOLLPRI))
+ return 0;
+
+ poll_wait(filp, &req->poll_wait, wait);
+ spin_lock_irqsave(&req->lock, flags);
+ if (req->state == MEDIA_REQUEST_STATE_COMPLETE) {
+ ret = EPOLLPRI;
+ goto unlock;
+ }
+ if (req->state != MEDIA_REQUEST_STATE_QUEUED) {
+ ret = EPOLLERR;
+ goto unlock;
+ }
+
+unlock:
+ spin_unlock_irqrestore(&req->lock, flags);
+ return ret;
+}
+
+static long media_request_ioctl_queue(struct media_request *req)
+{
+ struct media_device *mdev = req->mdev;
+ enum media_request_state state;
+ unsigned long flags;
+ int ret;
+
+ dev_dbg(mdev->dev, "request: queue %s\n", req->debug_str);
+
+ /*
+ * Ensure the request that is validated will be the one that gets queued
+ * next by serialising the queueing process. This mutex is also used
+ * to serialize with canceling a vb2 queue and with setting values such
+ * as controls in a request.
+ */
+ mutex_lock(&mdev->req_queue_mutex);
+
+ media_request_get(req);
+
+ spin_lock_irqsave(&req->lock, flags);
+ if (req->state == MEDIA_REQUEST_STATE_IDLE)
+ req->state = MEDIA_REQUEST_STATE_VALIDATING;
+ state = req->state;
+ spin_unlock_irqrestore(&req->lock, flags);
+ if (state != MEDIA_REQUEST_STATE_VALIDATING) {
+ dev_dbg(mdev->dev,
+ "request: unable to queue %s, request in state %s\n",
+ req->debug_str, media_request_state_str(state));
+ media_request_put(req);
+ mutex_unlock(&mdev->req_queue_mutex);
+ return -EBUSY;
+ }
+
+ ret = mdev->ops->req_validate(req);
+
+ /*
+ * If the req_validate was successful, then we mark the state as QUEUED
+ * and call req_queue. The reason we set the state first is that this
+ * allows req_queue to unbind or complete the queued objects in case
+ * they are immediately 'consumed'. State changes from QUEUED to another
+ * state can only happen if either the driver changes the state or if
+ * the user cancels the vb2 queue. The driver can only change the state
+ * after each object is queued through the req_queue op (and note that
+ * that op cannot fail), so setting the state to QUEUED up front is
+ * safe.
+ *
+ * The other reason for changing the state is if the vb2 queue is
+ * canceled, and that uses the req_queue_mutex which is still locked
+ * while req_queue is called, so that's safe as well.
+ */
+ spin_lock_irqsave(&req->lock, flags);
+ req->state = ret ? MEDIA_REQUEST_STATE_IDLE
+ : MEDIA_REQUEST_STATE_QUEUED;
+ spin_unlock_irqrestore(&req->lock, flags);
+
+ if (!ret)
+ mdev->ops->req_queue(req);
+
+ mutex_unlock(&mdev->req_queue_mutex);
+
+ if (ret) {
+ dev_dbg(mdev->dev, "request: can't queue %s (%d)\n",
+ req->debug_str, ret);
+ media_request_put(req);
+ }
+
+ return ret;
+}
+
+static long media_request_ioctl_reinit(struct media_request *req)
+{
+ struct media_device *mdev = req->mdev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&req->lock, flags);
+ if (req->state != MEDIA_REQUEST_STATE_IDLE &&
+ req->state != MEDIA_REQUEST_STATE_COMPLETE) {
+ dev_dbg(mdev->dev,
+ "request: %s not in idle or complete state, cannot reinit\n",
+ req->debug_str);
+ spin_unlock_irqrestore(&req->lock, flags);
+ return -EBUSY;
+ }
+ if (req->access_count) {
+ dev_dbg(mdev->dev,
+ "request: %s is being accessed, cannot reinit\n",
+ req->debug_str);
+ spin_unlock_irqrestore(&req->lock, flags);
+ return -EBUSY;
+ }
+ req->state = MEDIA_REQUEST_STATE_CLEANING;
+ spin_unlock_irqrestore(&req->lock, flags);
+
+ media_request_clean(req);
+
+ spin_lock_irqsave(&req->lock, flags);
+ req->state = MEDIA_REQUEST_STATE_IDLE;
+ spin_unlock_irqrestore(&req->lock, flags);
+
+ return 0;
+}
+
+static long media_request_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct media_request *req = filp->private_data;
+
+ switch (cmd) {
+ case MEDIA_REQUEST_IOC_QUEUE:
+ return media_request_ioctl_queue(req);
+ case MEDIA_REQUEST_IOC_REINIT:
+ return media_request_ioctl_reinit(req);
+ default:
+ return -ENOIOCTLCMD;
+ }
+}
+
+static const struct file_operations request_fops = {
+ .owner = THIS_MODULE,
+ .poll = media_request_poll,
+ .unlocked_ioctl = media_request_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = media_request_ioctl,
+#endif /* CONFIG_COMPAT */
+ .release = media_request_close,
+};
+
+struct media_request *
+media_request_get_by_fd(struct media_device *mdev, int request_fd)
+{
+ struct fd f;
+ struct media_request *req;
+
+ if (!mdev || !mdev->ops ||
+ !mdev->ops->req_validate || !mdev->ops->req_queue)
+ return ERR_PTR(-EBADR);
+
+ f = fdget(request_fd);
+ if (!f.file)
+ goto err_no_req_fd;
+
+ if (f.file->f_op != &request_fops)
+ goto err_fput;
+ req = f.file->private_data;
+ if (req->mdev != mdev)
+ goto err_fput;
+
+ /*
+ * Note: as long as someone has an open filehandle of the request,
+ * the request can never be released. The fdget() above ensures that
+ * even if userspace closes the request filehandle, the release()
+ * fop won't be called, so the media_request_get() always succeeds
+ * and there is no race condition where the request was released
+ * before media_request_get() is called.
+ */
+ media_request_get(req);
+ fdput(f);
+
+ return req;
+
+err_fput:
+ fdput(f);
+
+err_no_req_fd:
+ dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd);
+ return ERR_PTR(-EINVAL);
+}
+EXPORT_SYMBOL_GPL(media_request_get_by_fd);
+
+int media_request_alloc(struct media_device *mdev, int *alloc_fd)
+{
+ struct media_request *req;
+ struct file *filp;
+ int fd;
+ int ret;
+
+ /* Either both are NULL or both are non-NULL */
+ if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free))
+ return -ENOMEM;
+
+ if (mdev->ops->req_alloc)
+ req = mdev->ops->req_alloc(mdev);
+ else
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+ if (fd < 0) {
+ ret = fd;
+ goto err_free_req;
+ }
+
+ filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC);
+ if (IS_ERR(filp)) {
+ ret = PTR_ERR(filp);
+ goto err_put_fd;
+ }
+
+ filp->private_data = req;
+ req->mdev = mdev;
+ req->state = MEDIA_REQUEST_STATE_IDLE;
+ req->num_incomplete_objects = 0;
+ kref_init(&req->kref);
+ INIT_LIST_HEAD(&req->objects);
+ spin_lock_init(&req->lock);
+ init_waitqueue_head(&req->poll_wait);
+ req->updating_count = 0;
+ req->access_count = 0;
+
+ *alloc_fd = fd;
+
+ snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d",
+ atomic_inc_return(&mdev->request_id), fd);
+ dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str);
+
+ fd_install(fd, filp);
+
+ return 0;
+
+err_put_fd:
+ put_unused_fd(fd);
+
+err_free_req:
+ if (mdev->ops->req_free)
+ mdev->ops->req_free(req);
+ else
+ kfree(req);
+
+ return ret;
+}
+
+static void media_request_object_release(struct kref *kref)
+{
+ struct media_request_object *obj =
+ container_of(kref, struct media_request_object, kref);
+ struct media_request *req = obj->req;
+
+ if (WARN_ON(req))
+ media_request_object_unbind(obj);
+ obj->ops->release(obj);
+}
+
+struct media_request_object *
+media_request_object_find(struct media_request *req,
+ const struct media_request_object_ops *ops,
+ void *priv)
+{
+ struct media_request_object *obj;
+ struct media_request_object *found = NULL;
+ unsigned long flags;
+
+ if (WARN_ON(!ops || !priv))
+ return NULL;
+
+ spin_lock_irqsave(&req->lock, flags);
+ list_for_each_entry(obj, &req->objects, list) {
+ if (obj->ops == ops && obj->priv == priv) {
+ media_request_object_get(obj);
+ found = obj;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&req->lock, flags);
+ return found;
+}
+EXPORT_SYMBOL_GPL(media_request_object_find);
+
+void media_request_object_put(struct media_request_object *obj)
+{
+ kref_put(&obj->kref, media_request_object_release);
+}
+EXPORT_SYMBOL_GPL(media_request_object_put);
+
+void media_request_object_init(struct media_request_object *obj)
+{
+ obj->ops = NULL;
+ obj->req = NULL;
+ obj->priv = NULL;
+ obj->completed = false;
+ INIT_LIST_HEAD(&obj->list);
+ kref_init(&obj->kref);
+}
+EXPORT_SYMBOL_GPL(media_request_object_init);
+
+int media_request_object_bind(struct media_request *req,
+ const struct media_request_object_ops *ops,
+ void *priv, bool is_buffer,
+ struct media_request_object *obj)
+{
+ unsigned long flags;
+ int ret = -EBUSY;
+
+ if (WARN_ON(!ops->release))
+ return -EBADR;
+
+ spin_lock_irqsave(&req->lock, flags);
+
+ if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING &&
+ req->state != MEDIA_REQUEST_STATE_QUEUED))
+ goto unlock;
+
+ obj->req = req;
+ obj->ops = ops;
+ obj->priv = priv;
+
+ if (is_buffer)
+ list_add_tail(&obj->list, &req->objects);
+ else
+ list_add(&obj->list, &req->objects);
+ req->num_incomplete_objects++;
+ ret = 0;
+
+unlock:
+ spin_unlock_irqrestore(&req->lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(media_request_object_bind);
+
+void media_request_object_unbind(struct media_request_object *obj)
+{
+ struct media_request *req = obj->req;
+ unsigned long flags;
+ bool completed = false;
+
+ if (WARN_ON(!req))
+ return;
+
+ spin_lock_irqsave(&req->lock, flags);
+ list_del(&obj->list);
+ obj->req = NULL;
+
+ if (req->state == MEDIA_REQUEST_STATE_COMPLETE)
+ goto unlock;
+
+ if (WARN_ON(req->state == MEDIA_REQUEST_STATE_VALIDATING))
+ goto unlock;
+
+ if (req->state == MEDIA_REQUEST_STATE_CLEANING) {
+ if (!obj->completed)
+ req->num_incomplete_objects--;
+ goto unlock;
+ }
+
+ if (WARN_ON(!req->num_incomplete_objects))
+ goto unlock;
+
+ req->num_incomplete_objects--;
+ if (req->state == MEDIA_REQUEST_STATE_QUEUED &&
+ !req->num_incomplete_objects) {
+ req->state = MEDIA_REQUEST_STATE_COMPLETE;
+ completed = true;
+ wake_up_interruptible_all(&req->poll_wait);
+ }
+
+unlock:
+ spin_unlock_irqrestore(&req->lock, flags);
+ if (obj->ops->unbind)
+ obj->ops->unbind(obj);
+ if (completed)
+ media_request_put(req);
+}
+EXPORT_SYMBOL_GPL(media_request_object_unbind);
+
+void media_request_object_complete(struct media_request_object *obj)
+{
+ struct media_request *req = obj->req;
+ unsigned long flags;
+ bool completed = false;
+
+ spin_lock_irqsave(&req->lock, flags);
+ if (obj->completed)
+ goto unlock;
+ obj->completed = true;
+ if (WARN_ON(!req->num_incomplete_objects) ||
+ WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED))
+ goto unlock;
+
+ if (!--req->num_incomplete_objects) {
+ req->state = MEDIA_REQUEST_STATE_COMPLETE;
+ wake_up_interruptible_all(&req->poll_wait);
+ completed = true;
+ }
+unlock:
+ spin_unlock_irqrestore(&req->lock, flags);
+ if (completed)
+ media_request_put(req);
+}
+EXPORT_SYMBOL_GPL(media_request_object_complete);