summaryrefslogtreecommitdiffstats
path: root/drivers/media/common/videobuf2
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/media/common/videobuf2
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/media/common/videobuf2')
-rw-r--r--drivers/media/common/videobuf2/Kconfig32
-rw-r--r--drivers/media/common/videobuf2/Makefile17
-rw-r--r--drivers/media/common/videobuf2/frame_vector.c193
-rw-r--r--drivers/media/common/videobuf2/vb2-trace.c10
-rw-r--r--drivers/media/common/videobuf2/videobuf2-core.c3082
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-contig.c872
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dma-sg.c686
-rw-r--r--drivers/media/common/videobuf2/videobuf2-dvb.c341
-rw-r--r--drivers/media/common/videobuf2/videobuf2-memops.c129
-rw-r--r--drivers/media/common/videobuf2/videobuf2-v4l2.c1331
-rw-r--r--drivers/media/common/videobuf2/videobuf2-vmalloc.c450
11 files changed, 7143 insertions, 0 deletions
diff --git a/drivers/media/common/videobuf2/Kconfig b/drivers/media/common/videobuf2/Kconfig
new file mode 100644
index 000000000..d2223a12c
--- /dev/null
+++ b/drivers/media/common/videobuf2/Kconfig
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Used by drivers that need Videobuf2 modules
+config VIDEOBUF2_CORE
+ select DMA_SHARED_BUFFER
+ tristate
+
+config VIDEOBUF2_V4L2
+ tristate
+
+config VIDEOBUF2_MEMOPS
+ tristate
+
+config VIDEOBUF2_DMA_CONTIG
+ tristate
+ select VIDEOBUF2_CORE
+ select VIDEOBUF2_MEMOPS
+ select DMA_SHARED_BUFFER
+
+config VIDEOBUF2_VMALLOC
+ tristate
+ select VIDEOBUF2_CORE
+ select VIDEOBUF2_MEMOPS
+ select DMA_SHARED_BUFFER
+
+config VIDEOBUF2_DMA_SG
+ tristate
+ select VIDEOBUF2_CORE
+ select VIDEOBUF2_MEMOPS
+
+config VIDEOBUF2_DVB
+ tristate
+ select VIDEOBUF2_CORE
diff --git a/drivers/media/common/videobuf2/Makefile b/drivers/media/common/videobuf2/Makefile
new file mode 100644
index 000000000..a6fe3f304
--- /dev/null
+++ b/drivers/media/common/videobuf2/Makefile
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0
+videobuf2-common-objs := videobuf2-core.o
+videobuf2-common-objs += frame_vector.o
+
+ifeq ($(CONFIG_TRACEPOINTS),y)
+ videobuf2-common-objs += vb2-trace.o
+endif
+
+# Please keep it alphabetically sorted by Kconfig name
+# (e. g. LC_ALL=C sort Makefile)
+obj-$(CONFIG_VIDEOBUF2_CORE) += videobuf2-common.o
+obj-$(CONFIG_VIDEOBUF2_DMA_CONTIG) += videobuf2-dma-contig.o
+obj-$(CONFIG_VIDEOBUF2_DMA_SG) += videobuf2-dma-sg.o
+obj-$(CONFIG_VIDEOBUF2_DVB) += videobuf2-dvb.o
+obj-$(CONFIG_VIDEOBUF2_MEMOPS) += videobuf2-memops.o
+obj-$(CONFIG_VIDEOBUF2_V4L2) += videobuf2-v4l2.o
+obj-$(CONFIG_VIDEOBUF2_VMALLOC) += videobuf2-vmalloc.o
diff --git a/drivers/media/common/videobuf2/frame_vector.c b/drivers/media/common/videobuf2/frame_vector.c
new file mode 100644
index 000000000..07ebe4424
--- /dev/null
+++ b/drivers/media/common/videobuf2/frame_vector.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/sched.h>
+
+#include <media/frame_vector.h>
+
+/**
+ * get_vaddr_frames() - map virtual addresses to pfns
+ * @start: starting user address
+ * @nr_frames: number of pages / pfns from start to map
+ * @vec: structure which receives pages / pfns of the addresses mapped.
+ * It should have space for at least nr_frames entries.
+ *
+ * This function maps virtual addresses from @start and fills @vec structure
+ * with page frame numbers or page pointers to corresponding pages (choice
+ * depends on the type of the vma underlying the virtual address). If @start
+ * belongs to a normal vma, the function grabs reference to each of the pages
+ * to pin them in memory. If @start belongs to VM_IO | VM_PFNMAP vma, we don't
+ * touch page structures and the caller must make sure pfns aren't reused for
+ * anything else while he is using them.
+ *
+ * The function returns number of pages mapped which may be less than
+ * @nr_frames. In particular we stop mapping if there are more vmas of
+ * different type underlying the specified range of virtual addresses.
+ * When the function isn't able to map a single page, it returns error.
+ *
+ * Note that get_vaddr_frames() cannot follow VM_IO mappings. It used
+ * to be able to do that, but that could (racily) return non-refcounted
+ * pfns.
+ *
+ * This function takes care of grabbing mmap_lock as necessary.
+ */
+int get_vaddr_frames(unsigned long start, unsigned int nr_frames,
+ struct frame_vector *vec)
+{
+ int ret;
+
+ if (nr_frames == 0)
+ return 0;
+
+ if (WARN_ON_ONCE(nr_frames > vec->nr_allocated))
+ nr_frames = vec->nr_allocated;
+
+ start = untagged_addr(start);
+
+ ret = pin_user_pages_fast(start, nr_frames,
+ FOLL_FORCE | FOLL_WRITE | FOLL_LONGTERM,
+ (struct page **)(vec->ptrs));
+ vec->got_ref = true;
+ vec->is_pfns = false;
+ vec->nr_frames = ret;
+
+ if (likely(ret > 0))
+ return ret;
+
+ vec->nr_frames = 0;
+ return ret ? ret : -EFAULT;
+}
+EXPORT_SYMBOL(get_vaddr_frames);
+
+/**
+ * put_vaddr_frames() - drop references to pages if get_vaddr_frames() acquired
+ * them
+ * @vec: frame vector to put
+ *
+ * Drop references to pages if get_vaddr_frames() acquired them. We also
+ * invalidate the frame vector so that it is prepared for the next call into
+ * get_vaddr_frames().
+ */
+void put_vaddr_frames(struct frame_vector *vec)
+{
+ struct page **pages;
+
+ if (!vec->got_ref)
+ goto out;
+ pages = frame_vector_pages(vec);
+ /*
+ * frame_vector_pages() might needed to do a conversion when
+ * get_vaddr_frames() got pages but vec was later converted to pfns.
+ * But it shouldn't really fail to convert pfns back...
+ */
+ if (WARN_ON(IS_ERR(pages)))
+ goto out;
+
+ unpin_user_pages(pages, vec->nr_frames);
+ vec->got_ref = false;
+out:
+ vec->nr_frames = 0;
+}
+EXPORT_SYMBOL(put_vaddr_frames);
+
+/**
+ * frame_vector_to_pages - convert frame vector to contain page pointers
+ * @vec: frame vector to convert
+ *
+ * Convert @vec to contain array of page pointers. If the conversion is
+ * successful, return 0. Otherwise return an error. Note that we do not grab
+ * page references for the page structures.
+ */
+int frame_vector_to_pages(struct frame_vector *vec)
+{
+ int i;
+ unsigned long *nums;
+ struct page **pages;
+
+ if (!vec->is_pfns)
+ return 0;
+ nums = frame_vector_pfns(vec);
+ for (i = 0; i < vec->nr_frames; i++)
+ if (!pfn_valid(nums[i]))
+ return -EINVAL;
+ pages = (struct page **)nums;
+ for (i = 0; i < vec->nr_frames; i++)
+ pages[i] = pfn_to_page(nums[i]);
+ vec->is_pfns = false;
+ return 0;
+}
+EXPORT_SYMBOL(frame_vector_to_pages);
+
+/**
+ * frame_vector_to_pfns - convert frame vector to contain pfns
+ * @vec: frame vector to convert
+ *
+ * Convert @vec to contain array of pfns.
+ */
+void frame_vector_to_pfns(struct frame_vector *vec)
+{
+ int i;
+ unsigned long *nums;
+ struct page **pages;
+
+ if (vec->is_pfns)
+ return;
+ pages = (struct page **)(vec->ptrs);
+ nums = (unsigned long *)pages;
+ for (i = 0; i < vec->nr_frames; i++)
+ nums[i] = page_to_pfn(pages[i]);
+ vec->is_pfns = true;
+}
+EXPORT_SYMBOL(frame_vector_to_pfns);
+
+/**
+ * frame_vector_create() - allocate & initialize structure for pinned pfns
+ * @nr_frames: number of pfns slots we should reserve
+ *
+ * Allocate and initialize struct pinned_pfns to be able to hold @nr_pfns
+ * pfns.
+ */
+struct frame_vector *frame_vector_create(unsigned int nr_frames)
+{
+ struct frame_vector *vec;
+ int size = sizeof(struct frame_vector) + sizeof(void *) * nr_frames;
+
+ if (WARN_ON_ONCE(nr_frames == 0))
+ return NULL;
+ /*
+ * This is absurdly high. It's here just to avoid strange effects when
+ * arithmetics overflows.
+ */
+ if (WARN_ON_ONCE(nr_frames > INT_MAX / sizeof(void *) / 2))
+ return NULL;
+ /*
+ * Avoid higher order allocations, use vmalloc instead. It should
+ * be rare anyway.
+ */
+ vec = kvmalloc(size, GFP_KERNEL);
+ if (!vec)
+ return NULL;
+ vec->nr_allocated = nr_frames;
+ vec->nr_frames = 0;
+ return vec;
+}
+EXPORT_SYMBOL(frame_vector_create);
+
+/**
+ * frame_vector_destroy() - free memory allocated to carry frame vector
+ * @vec: Frame vector to free
+ *
+ * Free structure allocated by frame_vector_create() to carry frames.
+ */
+void frame_vector_destroy(struct frame_vector *vec)
+{
+ /* Make sure put_vaddr_frames() got called properly... */
+ VM_BUG_ON(vec->nr_frames > 0);
+ kvfree(vec);
+}
+EXPORT_SYMBOL(frame_vector_destroy);
diff --git a/drivers/media/common/videobuf2/vb2-trace.c b/drivers/media/common/videobuf2/vb2-trace.c
new file mode 100644
index 000000000..4c0f39d27
--- /dev/null
+++ b/drivers/media/common/videobuf2/vb2-trace.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <media/videobuf2-core.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/vb2.h>
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_buf_done);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_buf_queue);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_dqbuf);
+EXPORT_TRACEPOINT_SYMBOL_GPL(vb2_qbuf);
diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c
new file mode 100644
index 000000000..92efc4676
--- /dev/null
+++ b/drivers/media/common/videobuf2/videobuf2-core.c
@@ -0,0 +1,3082 @@
+/*
+ * videobuf2-core.c - video buffer 2 core framework
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <pawel@osciak.com>
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * The vb2_thread implementation was based on code from videobuf-dvb.c:
+ * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/freezer.h>
+#include <linux/kthread.h>
+
+#include <media/videobuf2-core.h>
+#include <media/v4l2-mc.h>
+
+#include <trace/events/vb2.h>
+
+static int debug;
+module_param(debug, int, 0644);
+
+#define dprintk(q, level, fmt, arg...) \
+ do { \
+ if (debug >= level) \
+ pr_info("[%s] %s: " fmt, (q)->name, __func__, \
+ ## arg); \
+ } while (0)
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+
+/*
+ * If advanced debugging is on, then count how often each op is called
+ * successfully, which can either be per-buffer or per-queue.
+ *
+ * This makes it easy to check that the 'init' and 'cleanup'
+ * (and variations thereof) stay balanced.
+ */
+
+#define log_memop(vb, op) \
+ dprintk((vb)->vb2_queue, 2, "call_memop(%d, %s)%s\n", \
+ (vb)->index, #op, \
+ (vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
+
+#define call_memop(vb, op, args...) \
+({ \
+ struct vb2_queue *_q = (vb)->vb2_queue; \
+ int err; \
+ \
+ log_memop(vb, op); \
+ err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0; \
+ if (!err) \
+ (vb)->cnt_mem_ ## op++; \
+ err; \
+})
+
+#define call_ptr_memop(op, vb, args...) \
+({ \
+ struct vb2_queue *_q = (vb)->vb2_queue; \
+ void *ptr; \
+ \
+ log_memop(vb, op); \
+ ptr = _q->mem_ops->op ? _q->mem_ops->op(vb, args) : NULL; \
+ if (!IS_ERR_OR_NULL(ptr)) \
+ (vb)->cnt_mem_ ## op++; \
+ ptr; \
+})
+
+#define call_void_memop(vb, op, args...) \
+({ \
+ struct vb2_queue *_q = (vb)->vb2_queue; \
+ \
+ log_memop(vb, op); \
+ if (_q->mem_ops->op) \
+ _q->mem_ops->op(args); \
+ (vb)->cnt_mem_ ## op++; \
+})
+
+#define log_qop(q, op) \
+ dprintk(q, 2, "call_qop(%s)%s\n", #op, \
+ (q)->ops->op ? "" : " (nop)")
+
+#define call_qop(q, op, args...) \
+({ \
+ int err; \
+ \
+ log_qop(q, op); \
+ err = (q)->ops->op ? (q)->ops->op(args) : 0; \
+ if (!err) \
+ (q)->cnt_ ## op++; \
+ err; \
+})
+
+#define call_void_qop(q, op, args...) \
+({ \
+ log_qop(q, op); \
+ if ((q)->ops->op) \
+ (q)->ops->op(args); \
+ (q)->cnt_ ## op++; \
+})
+
+#define log_vb_qop(vb, op, args...) \
+ dprintk((vb)->vb2_queue, 2, "call_vb_qop(%d, %s)%s\n", \
+ (vb)->index, #op, \
+ (vb)->vb2_queue->ops->op ? "" : " (nop)")
+
+#define call_vb_qop(vb, op, args...) \
+({ \
+ int err; \
+ \
+ log_vb_qop(vb, op); \
+ err = (vb)->vb2_queue->ops->op ? \
+ (vb)->vb2_queue->ops->op(args) : 0; \
+ if (!err) \
+ (vb)->cnt_ ## op++; \
+ err; \
+})
+
+#define call_void_vb_qop(vb, op, args...) \
+({ \
+ log_vb_qop(vb, op); \
+ if ((vb)->vb2_queue->ops->op) \
+ (vb)->vb2_queue->ops->op(args); \
+ (vb)->cnt_ ## op++; \
+})
+
+#else
+
+#define call_memop(vb, op, args...) \
+ ((vb)->vb2_queue->mem_ops->op ? \
+ (vb)->vb2_queue->mem_ops->op(args) : 0)
+
+#define call_ptr_memop(op, vb, args...) \
+ ((vb)->vb2_queue->mem_ops->op ? \
+ (vb)->vb2_queue->mem_ops->op(vb, args) : NULL)
+
+#define call_void_memop(vb, op, args...) \
+ do { \
+ if ((vb)->vb2_queue->mem_ops->op) \
+ (vb)->vb2_queue->mem_ops->op(args); \
+ } while (0)
+
+#define call_qop(q, op, args...) \
+ ((q)->ops->op ? (q)->ops->op(args) : 0)
+
+#define call_void_qop(q, op, args...) \
+ do { \
+ if ((q)->ops->op) \
+ (q)->ops->op(args); \
+ } while (0)
+
+#define call_vb_qop(vb, op, args...) \
+ ((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
+
+#define call_void_vb_qop(vb, op, args...) \
+ do { \
+ if ((vb)->vb2_queue->ops->op) \
+ (vb)->vb2_queue->ops->op(args); \
+ } while (0)
+
+#endif
+
+#define call_bufop(q, op, args...) \
+({ \
+ int ret = 0; \
+ if (q && q->buf_ops && q->buf_ops->op) \
+ ret = q->buf_ops->op(args); \
+ ret; \
+})
+
+#define call_void_bufop(q, op, args...) \
+({ \
+ if (q && q->buf_ops && q->buf_ops->op) \
+ q->buf_ops->op(args); \
+})
+
+static void __vb2_queue_cancel(struct vb2_queue *q);
+static void __enqueue_in_driver(struct vb2_buffer *vb);
+
+static const char *vb2_state_name(enum vb2_buffer_state s)
+{
+ static const char * const state_names[] = {
+ [VB2_BUF_STATE_DEQUEUED] = "dequeued",
+ [VB2_BUF_STATE_IN_REQUEST] = "in request",
+ [VB2_BUF_STATE_PREPARING] = "preparing",
+ [VB2_BUF_STATE_QUEUED] = "queued",
+ [VB2_BUF_STATE_ACTIVE] = "active",
+ [VB2_BUF_STATE_DONE] = "done",
+ [VB2_BUF_STATE_ERROR] = "error",
+ };
+
+ if ((unsigned int)(s) < ARRAY_SIZE(state_names))
+ return state_names[s];
+ return "unknown";
+}
+
+/*
+ * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
+ */
+static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ void *mem_priv;
+ int plane;
+ int ret = -ENOMEM;
+
+ /*
+ * Allocate memory for all planes in this buffer
+ * NOTE: mmapped areas should be page aligned
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ /* Memops alloc requires size to be page aligned. */
+ unsigned long size = PAGE_ALIGN(vb->planes[plane].length);
+
+ /* Did it wrap around? */
+ if (size < vb->planes[plane].length)
+ goto free;
+
+ mem_priv = call_ptr_memop(alloc,
+ vb,
+ q->alloc_devs[plane] ? : q->dev,
+ size);
+ if (IS_ERR_OR_NULL(mem_priv)) {
+ if (mem_priv)
+ ret = PTR_ERR(mem_priv);
+ goto free;
+ }
+
+ /* Associate allocator private data with this plane */
+ vb->planes[plane].mem_priv = mem_priv;
+ }
+
+ return 0;
+free:
+ /* Free already allocated memory if one of the allocations failed */
+ for (; plane > 0; --plane) {
+ call_void_memop(vb, put, vb->planes[plane - 1].mem_priv);
+ vb->planes[plane - 1].mem_priv = NULL;
+ }
+
+ return ret;
+}
+
+/*
+ * __vb2_buf_mem_free() - free memory of the given buffer
+ */
+static void __vb2_buf_mem_free(struct vb2_buffer *vb)
+{
+ unsigned int plane;
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ call_void_memop(vb, put, vb->planes[plane].mem_priv);
+ vb->planes[plane].mem_priv = NULL;
+ dprintk(vb->vb2_queue, 3, "freed plane %d of buffer %d\n",
+ plane, vb->index);
+ }
+}
+
+/*
+ * __vb2_buf_userptr_put() - release userspace memory associated with
+ * a USERPTR buffer
+ */
+static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
+{
+ unsigned int plane;
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ if (vb->planes[plane].mem_priv)
+ call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
+ vb->planes[plane].mem_priv = NULL;
+ }
+}
+
+/*
+ * __vb2_plane_dmabuf_put() - release memory associated with
+ * a DMABUF shared plane
+ */
+static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
+{
+ if (!p->mem_priv)
+ return;
+
+ if (p->dbuf_mapped)
+ call_void_memop(vb, unmap_dmabuf, p->mem_priv);
+
+ call_void_memop(vb, detach_dmabuf, p->mem_priv);
+ dma_buf_put(p->dbuf);
+ p->mem_priv = NULL;
+ p->dbuf = NULL;
+ p->dbuf_mapped = 0;
+}
+
+/*
+ * __vb2_buf_dmabuf_put() - release memory associated with
+ * a DMABUF shared buffer
+ */
+static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
+{
+ unsigned int plane;
+
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
+}
+
+/*
+ * __vb2_buf_mem_prepare() - call ->prepare() on buffer's private memory
+ * to sync caches
+ */
+static void __vb2_buf_mem_prepare(struct vb2_buffer *vb)
+{
+ unsigned int plane;
+
+ if (vb->synced)
+ return;
+
+ vb->synced = 1;
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
+}
+
+/*
+ * __vb2_buf_mem_finish() - call ->finish on buffer's private memory
+ * to sync caches
+ */
+static void __vb2_buf_mem_finish(struct vb2_buffer *vb)
+{
+ unsigned int plane;
+
+ if (!vb->synced)
+ return;
+
+ vb->synced = 0;
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ call_void_memop(vb, finish, vb->planes[plane].mem_priv);
+}
+
+/*
+ * __setup_offsets() - setup unique offsets ("cookies") for every plane in
+ * the buffer.
+ */
+static void __setup_offsets(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned int plane;
+ unsigned long off = 0;
+
+ if (vb->index) {
+ struct vb2_buffer *prev = q->bufs[vb->index - 1];
+ struct vb2_plane *p = &prev->planes[prev->num_planes - 1];
+
+ off = PAGE_ALIGN(p->m.offset + p->length);
+ }
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ vb->planes[plane].m.offset = off;
+
+ dprintk(q, 3, "buffer %d, plane %d offset 0x%08lx\n",
+ vb->index, plane, off);
+
+ off += vb->planes[plane].length;
+ off = PAGE_ALIGN(off);
+ }
+}
+
+static void init_buffer_cache_hints(struct vb2_queue *q, struct vb2_buffer *vb)
+{
+ /*
+ * DMA exporter should take care of cache syncs, so we can avoid
+ * explicit ->prepare()/->finish() syncs. For other ->memory types
+ * we always need ->prepare() or/and ->finish() cache sync.
+ */
+ if (q->memory == VB2_MEMORY_DMABUF) {
+ vb->skip_cache_sync_on_finish = 1;
+ vb->skip_cache_sync_on_prepare = 1;
+ return;
+ }
+
+ /*
+ * ->finish() cache sync can be avoided when queue direction is
+ * TO_DEVICE.
+ */
+ if (q->dma_dir == DMA_TO_DEVICE)
+ vb->skip_cache_sync_on_finish = 1;
+}
+
+/*
+ * __vb2_queue_alloc() - allocate vb2 buffer structures and (for MMAP type)
+ * video buffer memory for all buffers/planes on the queue and initializes the
+ * queue
+ *
+ * Returns the number of buffers successfully allocated.
+ */
+static int __vb2_queue_alloc(struct vb2_queue *q, enum vb2_memory memory,
+ unsigned int num_buffers, unsigned int num_planes,
+ const unsigned plane_sizes[VB2_MAX_PLANES])
+{
+ unsigned int buffer, plane;
+ struct vb2_buffer *vb;
+ int ret;
+
+ /* Ensure that q->num_buffers+num_buffers is below VB2_MAX_FRAME */
+ num_buffers = min_t(unsigned int, num_buffers,
+ VB2_MAX_FRAME - q->num_buffers);
+
+ for (buffer = 0; buffer < num_buffers; ++buffer) {
+ /* Allocate vb2 buffer structures */
+ vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
+ if (!vb) {
+ dprintk(q, 1, "memory alloc for buffer struct failed\n");
+ break;
+ }
+
+ vb->state = VB2_BUF_STATE_DEQUEUED;
+ vb->vb2_queue = q;
+ vb->num_planes = num_planes;
+ vb->index = q->num_buffers + buffer;
+ vb->type = q->type;
+ vb->memory = memory;
+ init_buffer_cache_hints(q, vb);
+ for (plane = 0; plane < num_planes; ++plane) {
+ vb->planes[plane].length = plane_sizes[plane];
+ vb->planes[plane].min_length = plane_sizes[plane];
+ }
+ call_void_bufop(q, init_buffer, vb);
+
+ q->bufs[vb->index] = vb;
+
+ /* Allocate video buffer memory for the MMAP type */
+ if (memory == VB2_MEMORY_MMAP) {
+ ret = __vb2_buf_mem_alloc(vb);
+ if (ret) {
+ dprintk(q, 1, "failed allocating memory for buffer %d\n",
+ buffer);
+ q->bufs[vb->index] = NULL;
+ kfree(vb);
+ break;
+ }
+ __setup_offsets(vb);
+ /*
+ * Call the driver-provided buffer initialization
+ * callback, if given. An error in initialization
+ * results in queue setup failure.
+ */
+ ret = call_vb_qop(vb, buf_init, vb);
+ if (ret) {
+ dprintk(q, 1, "buffer %d %p initialization failed\n",
+ buffer, vb);
+ __vb2_buf_mem_free(vb);
+ q->bufs[vb->index] = NULL;
+ kfree(vb);
+ break;
+ }
+ }
+ }
+
+ dprintk(q, 3, "allocated %d buffers, %d plane(s) each\n",
+ buffer, num_planes);
+
+ return buffer;
+}
+
+/*
+ * __vb2_free_mem() - release all video buffer memory for a given queue
+ */
+static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
+{
+ unsigned int buffer;
+ struct vb2_buffer *vb;
+
+ for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ ++buffer) {
+ vb = q->bufs[buffer];
+ if (!vb)
+ continue;
+
+ /* Free MMAP buffers or release USERPTR buffers */
+ if (q->memory == VB2_MEMORY_MMAP)
+ __vb2_buf_mem_free(vb);
+ else if (q->memory == VB2_MEMORY_DMABUF)
+ __vb2_buf_dmabuf_put(vb);
+ else
+ __vb2_buf_userptr_put(vb);
+ }
+}
+
+/*
+ * __vb2_queue_free() - free buffers at the end of the queue - video memory and
+ * related information, if no buffers are left return the queue to an
+ * uninitialized state. Might be called even if the queue has already been freed.
+ */
+static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
+{
+ unsigned int buffer;
+
+ /*
+ * Sanity check: when preparing a buffer the queue lock is released for
+ * a short while (see __buf_prepare for the details), which would allow
+ * a race with a reqbufs which can call this function. Removing the
+ * buffers from underneath __buf_prepare is obviously a bad idea, so we
+ * check if any of the buffers is in the state PREPARING, and if so we
+ * just return -EAGAIN.
+ */
+ for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ ++buffer) {
+ if (q->bufs[buffer] == NULL)
+ continue;
+ if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
+ dprintk(q, 1, "preparing buffers, cannot free\n");
+ return -EAGAIN;
+ }
+ }
+
+ /* Call driver-provided cleanup function for each buffer, if provided */
+ for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ ++buffer) {
+ struct vb2_buffer *vb = q->bufs[buffer];
+
+ if (vb && vb->planes[0].mem_priv)
+ call_void_vb_qop(vb, buf_cleanup, vb);
+ }
+
+ /* Release video buffer memory */
+ __vb2_free_mem(q, buffers);
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ /*
+ * Check that all the calls were balances during the life-time of this
+ * queue. If not (or if the debug level is 1 or up), then dump the
+ * counters to the kernel log.
+ */
+ if (q->num_buffers) {
+ bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
+ q->cnt_wait_prepare != q->cnt_wait_finish;
+
+ if (unbalanced || debug) {
+ pr_info("counters for queue %p:%s\n", q,
+ unbalanced ? " UNBALANCED!" : "");
+ pr_info(" setup: %u start_streaming: %u stop_streaming: %u\n",
+ q->cnt_queue_setup, q->cnt_start_streaming,
+ q->cnt_stop_streaming);
+ pr_info(" wait_prepare: %u wait_finish: %u\n",
+ q->cnt_wait_prepare, q->cnt_wait_finish);
+ }
+ q->cnt_queue_setup = 0;
+ q->cnt_wait_prepare = 0;
+ q->cnt_wait_finish = 0;
+ q->cnt_start_streaming = 0;
+ q->cnt_stop_streaming = 0;
+ }
+ for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+ struct vb2_buffer *vb = q->bufs[buffer];
+ bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
+ vb->cnt_mem_prepare != vb->cnt_mem_finish ||
+ vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
+ vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
+ vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
+ vb->cnt_buf_queue != vb->cnt_buf_done ||
+ vb->cnt_buf_prepare != vb->cnt_buf_finish ||
+ vb->cnt_buf_init != vb->cnt_buf_cleanup;
+
+ if (unbalanced || debug) {
+ pr_info(" counters for queue %p, buffer %d:%s\n",
+ q, buffer, unbalanced ? " UNBALANCED!" : "");
+ pr_info(" buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
+ vb->cnt_buf_init, vb->cnt_buf_cleanup,
+ vb->cnt_buf_prepare, vb->cnt_buf_finish);
+ pr_info(" buf_out_validate: %u buf_queue: %u buf_done: %u buf_request_complete: %u\n",
+ vb->cnt_buf_out_validate, vb->cnt_buf_queue,
+ vb->cnt_buf_done, vb->cnt_buf_request_complete);
+ pr_info(" alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
+ vb->cnt_mem_alloc, vb->cnt_mem_put,
+ vb->cnt_mem_prepare, vb->cnt_mem_finish,
+ vb->cnt_mem_mmap);
+ pr_info(" get_userptr: %u put_userptr: %u\n",
+ vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
+ pr_info(" attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
+ vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
+ vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
+ pr_info(" get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
+ vb->cnt_mem_get_dmabuf,
+ vb->cnt_mem_num_users,
+ vb->cnt_mem_vaddr,
+ vb->cnt_mem_cookie);
+ }
+ }
+#endif
+
+ /* Free vb2 buffers */
+ for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
+ ++buffer) {
+ kfree(q->bufs[buffer]);
+ q->bufs[buffer] = NULL;
+ }
+
+ q->num_buffers -= buffers;
+ if (!q->num_buffers) {
+ q->memory = VB2_MEMORY_UNKNOWN;
+ INIT_LIST_HEAD(&q->queued_list);
+ }
+ return 0;
+}
+
+bool vb2_buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
+{
+ unsigned int plane;
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ void *mem_priv = vb->planes[plane].mem_priv;
+ /*
+ * If num_users() has not been provided, call_memop
+ * will return 0, apparently nobody cares about this
+ * case anyway. If num_users() returns more than 1,
+ * we are not the only user of the plane's memory.
+ */
+ if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
+ return true;
+ }
+ return false;
+}
+EXPORT_SYMBOL(vb2_buffer_in_use);
+
+/*
+ * __buffers_in_use() - return true if any buffers on the queue are in use and
+ * the queue cannot be freed (by the means of REQBUFS(0)) call
+ */
+static bool __buffers_in_use(struct vb2_queue *q)
+{
+ unsigned int buffer;
+ for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+ if (vb2_buffer_in_use(q, q->bufs[buffer]))
+ return true;
+ }
+ return false;
+}
+
+void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb)
+{
+ call_void_bufop(q, fill_user_buffer, q->bufs[index], pb);
+}
+EXPORT_SYMBOL_GPL(vb2_core_querybuf);
+
+/*
+ * __verify_userptr_ops() - verify that all memory operations required for
+ * USERPTR queue type have been provided
+ */
+static int __verify_userptr_ops(struct vb2_queue *q)
+{
+ if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
+ !q->mem_ops->put_userptr)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * __verify_mmap_ops() - verify that all memory operations required for
+ * MMAP queue type have been provided
+ */
+static int __verify_mmap_ops(struct vb2_queue *q)
+{
+ if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
+ !q->mem_ops->put || !q->mem_ops->mmap)
+ return -EINVAL;
+
+ return 0;
+}
+
+/*
+ * __verify_dmabuf_ops() - verify that all memory operations required for
+ * DMABUF queue type have been provided
+ */
+static int __verify_dmabuf_ops(struct vb2_queue *q)
+{
+ if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
+ !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf ||
+ !q->mem_ops->unmap_dmabuf)
+ return -EINVAL;
+
+ return 0;
+}
+
+int vb2_verify_memory_type(struct vb2_queue *q,
+ enum vb2_memory memory, unsigned int type)
+{
+ if (memory != VB2_MEMORY_MMAP && memory != VB2_MEMORY_USERPTR &&
+ memory != VB2_MEMORY_DMABUF) {
+ dprintk(q, 1, "unsupported memory type\n");
+ return -EINVAL;
+ }
+
+ if (type != q->type) {
+ dprintk(q, 1, "requested type is incorrect\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Make sure all the required memory ops for given memory type
+ * are available.
+ */
+ if (memory == VB2_MEMORY_MMAP && __verify_mmap_ops(q)) {
+ dprintk(q, 1, "MMAP for current setup unsupported\n");
+ return -EINVAL;
+ }
+
+ if (memory == VB2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
+ dprintk(q, 1, "USERPTR for current setup unsupported\n");
+ return -EINVAL;
+ }
+
+ if (memory == VB2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
+ dprintk(q, 1, "DMABUF for current setup unsupported\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Place the busy tests at the end: -EBUSY can be ignored when
+ * create_bufs is called with count == 0, but count == 0 should still
+ * do the memory and type validation.
+ */
+ if (vb2_fileio_is_active(q)) {
+ dprintk(q, 1, "file io in progress\n");
+ return -EBUSY;
+ }
+ return 0;
+}
+EXPORT_SYMBOL(vb2_verify_memory_type);
+
+static void set_queue_coherency(struct vb2_queue *q, bool non_coherent_mem)
+{
+ q->non_coherent_mem = 0;
+
+ if (!vb2_queue_allows_cache_hints(q))
+ return;
+ q->non_coherent_mem = non_coherent_mem;
+}
+
+static bool verify_coherency_flags(struct vb2_queue *q, bool non_coherent_mem)
+{
+ if (non_coherent_mem != q->non_coherent_mem) {
+ dprintk(q, 1, "memory coherency model mismatch\n");
+ return false;
+ }
+ return true;
+}
+
+int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory,
+ unsigned int flags, unsigned int *count)
+{
+ unsigned int num_buffers, allocated_buffers, num_planes = 0;
+ unsigned plane_sizes[VB2_MAX_PLANES] = { };
+ bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT;
+ unsigned int i;
+ int ret;
+
+ if (q->streaming) {
+ dprintk(q, 1, "streaming active\n");
+ return -EBUSY;
+ }
+
+ if (q->waiting_in_dqbuf && *count) {
+ dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
+ return -EBUSY;
+ }
+
+ if (*count == 0 || q->num_buffers != 0 ||
+ (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory) ||
+ !verify_coherency_flags(q, non_coherent_mem)) {
+ /*
+ * We already have buffers allocated, so first check if they
+ * are not in use and can be freed.
+ */
+ mutex_lock(&q->mmap_lock);
+ if (debug && q->memory == VB2_MEMORY_MMAP &&
+ __buffers_in_use(q))
+ dprintk(q, 1, "memory in use, orphaning buffers\n");
+
+ /*
+ * Call queue_cancel to clean up any buffers in the
+ * QUEUED state which is possible if buffers were prepared or
+ * queued without ever calling STREAMON.
+ */
+ __vb2_queue_cancel(q);
+ ret = __vb2_queue_free(q, q->num_buffers);
+ mutex_unlock(&q->mmap_lock);
+ if (ret)
+ return ret;
+
+ /*
+ * In case of REQBUFS(0) return immediately without calling
+ * driver's queue_setup() callback and allocating resources.
+ */
+ if (*count == 0)
+ return 0;
+ }
+
+ /*
+ * Make sure the requested values and current defaults are sane.
+ */
+ WARN_ON(q->min_buffers_needed > VB2_MAX_FRAME);
+ num_buffers = max_t(unsigned int, *count, q->min_buffers_needed);
+ num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME);
+ memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
+ /*
+ * Set this now to ensure that drivers see the correct q->memory value
+ * in the queue_setup op.
+ */
+ mutex_lock(&q->mmap_lock);
+ q->memory = memory;
+ mutex_unlock(&q->mmap_lock);
+ set_queue_coherency(q, non_coherent_mem);
+
+ /*
+ * Ask the driver how many buffers and planes per buffer it requires.
+ * Driver also sets the size and allocator context for each plane.
+ */
+ ret = call_qop(q, queue_setup, q, &num_buffers, &num_planes,
+ plane_sizes, q->alloc_devs);
+ if (ret)
+ goto error;
+
+ /* Check that driver has set sane values */
+ if (WARN_ON(!num_planes)) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ for (i = 0; i < num_planes; i++)
+ if (WARN_ON(!plane_sizes[i])) {
+ ret = -EINVAL;
+ goto error;
+ }
+
+ /* Finally, allocate buffers and video memory */
+ allocated_buffers =
+ __vb2_queue_alloc(q, memory, num_buffers, num_planes, plane_sizes);
+ if (allocated_buffers == 0) {
+ dprintk(q, 1, "memory allocation failed\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /*
+ * There is no point in continuing if we can't allocate the minimum
+ * number of buffers needed by this vb2_queue.
+ */
+ if (allocated_buffers < q->min_buffers_needed)
+ ret = -ENOMEM;
+
+ /*
+ * Check if driver can handle the allocated number of buffers.
+ */
+ if (!ret && allocated_buffers < num_buffers) {
+ num_buffers = allocated_buffers;
+ /*
+ * num_planes is set by the previous queue_setup(), but since it
+ * signals to queue_setup() whether it is called from create_bufs()
+ * vs reqbufs() we zero it here to signal that queue_setup() is
+ * called for the reqbufs() case.
+ */
+ num_planes = 0;
+
+ ret = call_qop(q, queue_setup, q, &num_buffers,
+ &num_planes, plane_sizes, q->alloc_devs);
+
+ if (!ret && allocated_buffers < num_buffers)
+ ret = -ENOMEM;
+
+ /*
+ * Either the driver has accepted a smaller number of buffers,
+ * or .queue_setup() returned an error
+ */
+ }
+
+ mutex_lock(&q->mmap_lock);
+ q->num_buffers = allocated_buffers;
+
+ if (ret < 0) {
+ /*
+ * Note: __vb2_queue_free() will subtract 'allocated_buffers'
+ * from q->num_buffers and it will reset q->memory to
+ * VB2_MEMORY_UNKNOWN.
+ */
+ __vb2_queue_free(q, allocated_buffers);
+ mutex_unlock(&q->mmap_lock);
+ return ret;
+ }
+ mutex_unlock(&q->mmap_lock);
+
+ /*
+ * Return the number of successfully allocated buffers
+ * to the userspace.
+ */
+ *count = allocated_buffers;
+ q->waiting_for_buffers = !q->is_output;
+
+ return 0;
+
+error:
+ mutex_lock(&q->mmap_lock);
+ q->memory = VB2_MEMORY_UNKNOWN;
+ mutex_unlock(&q->mmap_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vb2_core_reqbufs);
+
+int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
+ unsigned int flags, unsigned int *count,
+ unsigned int requested_planes,
+ const unsigned int requested_sizes[])
+{
+ unsigned int num_planes = 0, num_buffers, allocated_buffers;
+ unsigned plane_sizes[VB2_MAX_PLANES] = { };
+ bool non_coherent_mem = flags & V4L2_MEMORY_FLAG_NON_COHERENT;
+ bool no_previous_buffers = !q->num_buffers;
+ int ret;
+
+ if (q->num_buffers == VB2_MAX_FRAME) {
+ dprintk(q, 1, "maximum number of buffers already allocated\n");
+ return -ENOBUFS;
+ }
+
+ if (no_previous_buffers) {
+ if (q->waiting_in_dqbuf && *count) {
+ dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
+ return -EBUSY;
+ }
+ memset(q->alloc_devs, 0, sizeof(q->alloc_devs));
+ /*
+ * Set this now to ensure that drivers see the correct q->memory
+ * value in the queue_setup op.
+ */
+ mutex_lock(&q->mmap_lock);
+ q->memory = memory;
+ mutex_unlock(&q->mmap_lock);
+ q->waiting_for_buffers = !q->is_output;
+ set_queue_coherency(q, non_coherent_mem);
+ } else {
+ if (q->memory != memory) {
+ dprintk(q, 1, "memory model mismatch\n");
+ return -EINVAL;
+ }
+ if (!verify_coherency_flags(q, non_coherent_mem))
+ return -EINVAL;
+ }
+
+ num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers);
+
+ if (requested_planes && requested_sizes) {
+ num_planes = requested_planes;
+ memcpy(plane_sizes, requested_sizes, sizeof(plane_sizes));
+ }
+
+ /*
+ * Ask the driver, whether the requested number of buffers, planes per
+ * buffer and their sizes are acceptable
+ */
+ ret = call_qop(q, queue_setup, q, &num_buffers,
+ &num_planes, plane_sizes, q->alloc_devs);
+ if (ret)
+ goto error;
+
+ /* Finally, allocate buffers and video memory */
+ allocated_buffers = __vb2_queue_alloc(q, memory, num_buffers,
+ num_planes, plane_sizes);
+ if (allocated_buffers == 0) {
+ dprintk(q, 1, "memory allocation failed\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ /*
+ * Check if driver can handle the so far allocated number of buffers.
+ */
+ if (allocated_buffers < num_buffers) {
+ num_buffers = allocated_buffers;
+
+ /*
+ * q->num_buffers contains the total number of buffers, that the
+ * queue driver has set up
+ */
+ ret = call_qop(q, queue_setup, q, &num_buffers,
+ &num_planes, plane_sizes, q->alloc_devs);
+
+ if (!ret && allocated_buffers < num_buffers)
+ ret = -ENOMEM;
+
+ /*
+ * Either the driver has accepted a smaller number of buffers,
+ * or .queue_setup() returned an error
+ */
+ }
+
+ mutex_lock(&q->mmap_lock);
+ q->num_buffers += allocated_buffers;
+
+ if (ret < 0) {
+ /*
+ * Note: __vb2_queue_free() will subtract 'allocated_buffers'
+ * from q->num_buffers and it will reset q->memory to
+ * VB2_MEMORY_UNKNOWN.
+ */
+ __vb2_queue_free(q, allocated_buffers);
+ mutex_unlock(&q->mmap_lock);
+ return -ENOMEM;
+ }
+ mutex_unlock(&q->mmap_lock);
+
+ /*
+ * Return the number of successfully allocated buffers
+ * to the userspace.
+ */
+ *count = allocated_buffers;
+
+ return 0;
+
+error:
+ if (no_previous_buffers) {
+ mutex_lock(&q->mmap_lock);
+ q->memory = VB2_MEMORY_UNKNOWN;
+ mutex_unlock(&q->mmap_lock);
+ }
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vb2_core_create_bufs);
+
+void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
+{
+ if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
+ return NULL;
+
+ return call_ptr_memop(vaddr, vb, vb->planes[plane_no].mem_priv);
+
+}
+EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
+
+void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
+{
+ if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
+ return NULL;
+
+ return call_ptr_memop(cookie, vb, vb->planes[plane_no].mem_priv);
+}
+EXPORT_SYMBOL_GPL(vb2_plane_cookie);
+
+void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned long flags;
+
+ if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
+ return;
+
+ if (WARN_ON(state != VB2_BUF_STATE_DONE &&
+ state != VB2_BUF_STATE_ERROR &&
+ state != VB2_BUF_STATE_QUEUED))
+ state = VB2_BUF_STATE_ERROR;
+
+#ifdef CONFIG_VIDEO_ADV_DEBUG
+ /*
+ * Although this is not a callback, it still does have to balance
+ * with the buf_queue op. So update this counter manually.
+ */
+ vb->cnt_buf_done++;
+#endif
+ dprintk(q, 4, "done processing on buffer %d, state: %s\n",
+ vb->index, vb2_state_name(state));
+
+ if (state != VB2_BUF_STATE_QUEUED)
+ __vb2_buf_mem_finish(vb);
+
+ spin_lock_irqsave(&q->done_lock, flags);
+ if (state == VB2_BUF_STATE_QUEUED) {
+ vb->state = VB2_BUF_STATE_QUEUED;
+ } else {
+ /* Add the buffer to the done buffers list */
+ list_add_tail(&vb->done_entry, &q->done_list);
+ vb->state = state;
+ }
+ atomic_dec(&q->owned_by_drv_count);
+
+ if (state != VB2_BUF_STATE_QUEUED && vb->req_obj.req) {
+ media_request_object_unbind(&vb->req_obj);
+ media_request_object_put(&vb->req_obj);
+ }
+
+ spin_unlock_irqrestore(&q->done_lock, flags);
+
+ trace_vb2_buf_done(q, vb);
+
+ switch (state) {
+ case VB2_BUF_STATE_QUEUED:
+ return;
+ default:
+ /* Inform any processes that may be waiting for buffers */
+ wake_up(&q->done_wq);
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(vb2_buffer_done);
+
+void vb2_discard_done(struct vb2_queue *q)
+{
+ struct vb2_buffer *vb;
+ unsigned long flags;
+
+ spin_lock_irqsave(&q->done_lock, flags);
+ list_for_each_entry(vb, &q->done_list, done_entry)
+ vb->state = VB2_BUF_STATE_ERROR;
+ spin_unlock_irqrestore(&q->done_lock, flags);
+}
+EXPORT_SYMBOL_GPL(vb2_discard_done);
+
+/*
+ * __prepare_mmap() - prepare an MMAP buffer
+ */
+static int __prepare_mmap(struct vb2_buffer *vb)
+{
+ int ret = 0;
+
+ ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+ vb, vb->planes);
+ return ret ? ret : call_vb_qop(vb, buf_prepare, vb);
+}
+
+/*
+ * __prepare_userptr() - prepare a USERPTR buffer
+ */
+static int __prepare_userptr(struct vb2_buffer *vb)
+{
+ struct vb2_plane planes[VB2_MAX_PLANES];
+ struct vb2_queue *q = vb->vb2_queue;
+ void *mem_priv;
+ unsigned int plane;
+ int ret = 0;
+ bool reacquired = vb->planes[0].mem_priv == NULL;
+
+ memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
+ /* Copy relevant information provided by the userspace */
+ ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+ vb, planes);
+ if (ret)
+ return ret;
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ /* Skip the plane if already verified */
+ if (vb->planes[plane].m.userptr &&
+ vb->planes[plane].m.userptr == planes[plane].m.userptr
+ && vb->planes[plane].length == planes[plane].length)
+ continue;
+
+ dprintk(q, 3, "userspace address for plane %d changed, reacquiring memory\n",
+ plane);
+
+ /* Check if the provided plane buffer is large enough */
+ if (planes[plane].length < vb->planes[plane].min_length) {
+ dprintk(q, 1, "provided buffer size %u is less than setup size %u for plane %d\n",
+ planes[plane].length,
+ vb->planes[plane].min_length,
+ plane);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Release previously acquired memory if present */
+ if (vb->planes[plane].mem_priv) {
+ if (!reacquired) {
+ reacquired = true;
+ vb->copied_timestamp = 0;
+ call_void_vb_qop(vb, buf_cleanup, vb);
+ }
+ call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
+ }
+
+ vb->planes[plane].mem_priv = NULL;
+ vb->planes[plane].bytesused = 0;
+ vb->planes[plane].length = 0;
+ vb->planes[plane].m.userptr = 0;
+ vb->planes[plane].data_offset = 0;
+
+ /* Acquire each plane's memory */
+ mem_priv = call_ptr_memop(get_userptr,
+ vb,
+ q->alloc_devs[plane] ? : q->dev,
+ planes[plane].m.userptr,
+ planes[plane].length);
+ if (IS_ERR(mem_priv)) {
+ dprintk(q, 1, "failed acquiring userspace memory for plane %d\n",
+ plane);
+ ret = PTR_ERR(mem_priv);
+ goto err;
+ }
+ vb->planes[plane].mem_priv = mem_priv;
+ }
+
+ /*
+ * Now that everything is in order, copy relevant information
+ * provided by userspace.
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ vb->planes[plane].bytesused = planes[plane].bytesused;
+ vb->planes[plane].length = planes[plane].length;
+ vb->planes[plane].m.userptr = planes[plane].m.userptr;
+ vb->planes[plane].data_offset = planes[plane].data_offset;
+ }
+
+ if (reacquired) {
+ /*
+ * One or more planes changed, so we must call buf_init to do
+ * the driver-specific initialization on the newly acquired
+ * buffer, if provided.
+ */
+ ret = call_vb_qop(vb, buf_init, vb);
+ if (ret) {
+ dprintk(q, 1, "buffer initialization failed\n");
+ goto err;
+ }
+ }
+
+ ret = call_vb_qop(vb, buf_prepare, vb);
+ if (ret) {
+ dprintk(q, 1, "buffer preparation failed\n");
+ call_void_vb_qop(vb, buf_cleanup, vb);
+ goto err;
+ }
+
+ return 0;
+err:
+ /* In case of errors, release planes that were already acquired */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ if (vb->planes[plane].mem_priv)
+ call_void_memop(vb, put_userptr,
+ vb->planes[plane].mem_priv);
+ vb->planes[plane].mem_priv = NULL;
+ vb->planes[plane].m.userptr = 0;
+ vb->planes[plane].length = 0;
+ }
+
+ return ret;
+}
+
+/*
+ * __prepare_dmabuf() - prepare a DMABUF buffer
+ */
+static int __prepare_dmabuf(struct vb2_buffer *vb)
+{
+ struct vb2_plane planes[VB2_MAX_PLANES];
+ struct vb2_queue *q = vb->vb2_queue;
+ void *mem_priv;
+ unsigned int plane;
+ int ret = 0;
+ bool reacquired = vb->planes[0].mem_priv == NULL;
+
+ memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
+ /* Copy relevant information provided by the userspace */
+ ret = call_bufop(vb->vb2_queue, fill_vb2_buffer,
+ vb, planes);
+ if (ret)
+ return ret;
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
+
+ if (IS_ERR_OR_NULL(dbuf)) {
+ dprintk(q, 1, "invalid dmabuf fd for plane %d\n",
+ plane);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* use DMABUF size if length is not provided */
+ if (planes[plane].length == 0)
+ planes[plane].length = dbuf->size;
+
+ if (planes[plane].length < vb->planes[plane].min_length) {
+ dprintk(q, 1, "invalid dmabuf length %u for plane %d, minimum length %u\n",
+ planes[plane].length, plane,
+ vb->planes[plane].min_length);
+ dma_buf_put(dbuf);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Skip the plane if already verified */
+ if (dbuf == vb->planes[plane].dbuf &&
+ vb->planes[plane].length == planes[plane].length) {
+ dma_buf_put(dbuf);
+ continue;
+ }
+
+ dprintk(q, 3, "buffer for plane %d changed\n", plane);
+
+ if (!reacquired) {
+ reacquired = true;
+ vb->copied_timestamp = 0;
+ call_void_vb_qop(vb, buf_cleanup, vb);
+ }
+
+ /* Release previously acquired memory if present */
+ __vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
+ vb->planes[plane].bytesused = 0;
+ vb->planes[plane].length = 0;
+ vb->planes[plane].m.fd = 0;
+ vb->planes[plane].data_offset = 0;
+
+ /* Acquire each plane's memory */
+ mem_priv = call_ptr_memop(attach_dmabuf,
+ vb,
+ q->alloc_devs[plane] ? : q->dev,
+ dbuf,
+ planes[plane].length);
+ if (IS_ERR(mem_priv)) {
+ dprintk(q, 1, "failed to attach dmabuf\n");
+ ret = PTR_ERR(mem_priv);
+ dma_buf_put(dbuf);
+ goto err;
+ }
+
+ vb->planes[plane].dbuf = dbuf;
+ vb->planes[plane].mem_priv = mem_priv;
+ }
+
+ /*
+ * This pins the buffer(s) with dma_buf_map_attachment()). It's done
+ * here instead just before the DMA, while queueing the buffer(s) so
+ * userspace knows sooner rather than later if the dma-buf map fails.
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ if (vb->planes[plane].dbuf_mapped)
+ continue;
+
+ ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
+ if (ret) {
+ dprintk(q, 1, "failed to map dmabuf for plane %d\n",
+ plane);
+ goto err;
+ }
+ vb->planes[plane].dbuf_mapped = 1;
+ }
+
+ /*
+ * Now that everything is in order, copy relevant information
+ * provided by userspace.
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ vb->planes[plane].bytesused = planes[plane].bytesused;
+ vb->planes[plane].length = planes[plane].length;
+ vb->planes[plane].m.fd = planes[plane].m.fd;
+ vb->planes[plane].data_offset = planes[plane].data_offset;
+ }
+
+ if (reacquired) {
+ /*
+ * Call driver-specific initialization on the newly acquired buffer,
+ * if provided.
+ */
+ ret = call_vb_qop(vb, buf_init, vb);
+ if (ret) {
+ dprintk(q, 1, "buffer initialization failed\n");
+ goto err;
+ }
+ }
+
+ ret = call_vb_qop(vb, buf_prepare, vb);
+ if (ret) {
+ dprintk(q, 1, "buffer preparation failed\n");
+ call_void_vb_qop(vb, buf_cleanup, vb);
+ goto err;
+ }
+
+ return 0;
+err:
+ /* In case of errors, release planes that were already acquired */
+ __vb2_buf_dmabuf_put(vb);
+
+ return ret;
+}
+
+/*
+ * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
+ */
+static void __enqueue_in_driver(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+
+ vb->state = VB2_BUF_STATE_ACTIVE;
+ atomic_inc(&q->owned_by_drv_count);
+
+ trace_vb2_buf_queue(q, vb);
+
+ call_void_vb_qop(vb, buf_queue, vb);
+}
+
+static int __buf_prepare(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ enum vb2_buffer_state orig_state = vb->state;
+ int ret;
+
+ if (q->error) {
+ dprintk(q, 1, "fatal error occurred on queue\n");
+ return -EIO;
+ }
+
+ if (vb->prepared)
+ return 0;
+ WARN_ON(vb->synced);
+
+ if (q->is_output) {
+ ret = call_vb_qop(vb, buf_out_validate, vb);
+ if (ret) {
+ dprintk(q, 1, "buffer validation failed\n");
+ return ret;
+ }
+ }
+
+ vb->state = VB2_BUF_STATE_PREPARING;
+
+ switch (q->memory) {
+ case VB2_MEMORY_MMAP:
+ ret = __prepare_mmap(vb);
+ break;
+ case VB2_MEMORY_USERPTR:
+ ret = __prepare_userptr(vb);
+ break;
+ case VB2_MEMORY_DMABUF:
+ ret = __prepare_dmabuf(vb);
+ break;
+ default:
+ WARN(1, "Invalid queue type\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ if (ret) {
+ dprintk(q, 1, "buffer preparation failed: %d\n", ret);
+ vb->state = orig_state;
+ return ret;
+ }
+
+ __vb2_buf_mem_prepare(vb);
+ vb->prepared = 1;
+ vb->state = orig_state;
+
+ return 0;
+}
+
+static int vb2_req_prepare(struct media_request_object *obj)
+{
+ struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
+ int ret;
+
+ if (WARN_ON(vb->state != VB2_BUF_STATE_IN_REQUEST))
+ return -EINVAL;
+
+ mutex_lock(vb->vb2_queue->lock);
+ ret = __buf_prepare(vb);
+ mutex_unlock(vb->vb2_queue->lock);
+ return ret;
+}
+
+static void __vb2_dqbuf(struct vb2_buffer *vb);
+
+static void vb2_req_unprepare(struct media_request_object *obj)
+{
+ struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
+
+ mutex_lock(vb->vb2_queue->lock);
+ __vb2_dqbuf(vb);
+ vb->state = VB2_BUF_STATE_IN_REQUEST;
+ mutex_unlock(vb->vb2_queue->lock);
+ WARN_ON(!vb->req_obj.req);
+}
+
+int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
+ struct media_request *req);
+
+static void vb2_req_queue(struct media_request_object *obj)
+{
+ struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
+ int err;
+
+ mutex_lock(vb->vb2_queue->lock);
+ /*
+ * There is no method to propagate an error from vb2_core_qbuf(),
+ * so if this returns a non-0 value, then WARN.
+ *
+ * The only exception is -EIO which is returned if q->error is
+ * set. We just ignore that, and expect this will be caught the
+ * next time vb2_req_prepare() is called.
+ */
+ err = vb2_core_qbuf(vb->vb2_queue, vb->index, NULL, NULL);
+ WARN_ON_ONCE(err && err != -EIO);
+ mutex_unlock(vb->vb2_queue->lock);
+}
+
+static void vb2_req_unbind(struct media_request_object *obj)
+{
+ struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
+
+ if (vb->state == VB2_BUF_STATE_IN_REQUEST)
+ call_void_bufop(vb->vb2_queue, init_buffer, vb);
+}
+
+static void vb2_req_release(struct media_request_object *obj)
+{
+ struct vb2_buffer *vb = container_of(obj, struct vb2_buffer, req_obj);
+
+ if (vb->state == VB2_BUF_STATE_IN_REQUEST) {
+ vb->state = VB2_BUF_STATE_DEQUEUED;
+ if (vb->request)
+ media_request_put(vb->request);
+ vb->request = NULL;
+ }
+}
+
+static const struct media_request_object_ops vb2_core_req_ops = {
+ .prepare = vb2_req_prepare,
+ .unprepare = vb2_req_unprepare,
+ .queue = vb2_req_queue,
+ .unbind = vb2_req_unbind,
+ .release = vb2_req_release,
+};
+
+bool vb2_request_object_is_buffer(struct media_request_object *obj)
+{
+ return obj->ops == &vb2_core_req_ops;
+}
+EXPORT_SYMBOL_GPL(vb2_request_object_is_buffer);
+
+unsigned int vb2_request_buffer_cnt(struct media_request *req)
+{
+ struct media_request_object *obj;
+ unsigned long flags;
+ unsigned int buffer_cnt = 0;
+
+ spin_lock_irqsave(&req->lock, flags);
+ list_for_each_entry(obj, &req->objects, list)
+ if (vb2_request_object_is_buffer(obj))
+ buffer_cnt++;
+ spin_unlock_irqrestore(&req->lock, flags);
+
+ return buffer_cnt;
+}
+EXPORT_SYMBOL_GPL(vb2_request_buffer_cnt);
+
+int vb2_core_prepare_buf(struct vb2_queue *q, unsigned int index, void *pb)
+{
+ struct vb2_buffer *vb;
+ int ret;
+
+ vb = q->bufs[index];
+ if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ dprintk(q, 1, "invalid buffer state %s\n",
+ vb2_state_name(vb->state));
+ return -EINVAL;
+ }
+ if (vb->prepared) {
+ dprintk(q, 1, "buffer already prepared\n");
+ return -EINVAL;
+ }
+
+ ret = __buf_prepare(vb);
+ if (ret)
+ return ret;
+
+ /* Fill buffer information for the userspace */
+ call_void_bufop(q, fill_user_buffer, vb, pb);
+
+ dprintk(q, 2, "prepare of buffer %d succeeded\n", vb->index);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_core_prepare_buf);
+
+/*
+ * vb2_start_streaming() - Attempt to start streaming.
+ * @q: videobuf2 queue
+ *
+ * Attempt to start streaming. When this function is called there must be
+ * at least q->min_buffers_needed buffers queued up (i.e. the minimum
+ * number of buffers required for the DMA engine to function). If the
+ * @start_streaming op fails it is supposed to return all the driver-owned
+ * buffers back to vb2 in state QUEUED. Check if that happened and if
+ * not warn and reclaim them forcefully.
+ */
+static int vb2_start_streaming(struct vb2_queue *q)
+{
+ struct vb2_buffer *vb;
+ int ret;
+
+ /*
+ * If any buffers were queued before streamon,
+ * we can now pass them to driver for processing.
+ */
+ list_for_each_entry(vb, &q->queued_list, queued_entry)
+ __enqueue_in_driver(vb);
+
+ /* Tell the driver to start streaming */
+ q->start_streaming_called = 1;
+ ret = call_qop(q, start_streaming, q,
+ atomic_read(&q->owned_by_drv_count));
+ if (!ret)
+ return 0;
+
+ q->start_streaming_called = 0;
+
+ dprintk(q, 1, "driver refused to start streaming\n");
+ /*
+ * If you see this warning, then the driver isn't cleaning up properly
+ * after a failed start_streaming(). See the start_streaming()
+ * documentation in videobuf2-core.h for more information how buffers
+ * should be returned to vb2 in start_streaming().
+ */
+ if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
+ unsigned i;
+
+ /*
+ * Forcefully reclaim buffers if the driver did not
+ * correctly return them to vb2.
+ */
+ for (i = 0; i < q->num_buffers; ++i) {
+ vb = q->bufs[i];
+ if (vb->state == VB2_BUF_STATE_ACTIVE)
+ vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
+ }
+ /* Must be zero now */
+ WARN_ON(atomic_read(&q->owned_by_drv_count));
+ }
+ /*
+ * If done_list is not empty, then start_streaming() didn't call
+ * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or
+ * STATE_DONE.
+ */
+ WARN_ON(!list_empty(&q->done_list));
+ return ret;
+}
+
+int vb2_core_qbuf(struct vb2_queue *q, unsigned int index, void *pb,
+ struct media_request *req)
+{
+ struct vb2_buffer *vb;
+ enum vb2_buffer_state orig_state;
+ int ret;
+
+ if (q->error) {
+ dprintk(q, 1, "fatal error occurred on queue\n");
+ return -EIO;
+ }
+
+ vb = q->bufs[index];
+
+ if (!req && vb->state != VB2_BUF_STATE_IN_REQUEST &&
+ q->requires_requests) {
+ dprintk(q, 1, "qbuf requires a request\n");
+ return -EBADR;
+ }
+
+ if ((req && q->uses_qbuf) ||
+ (!req && vb->state != VB2_BUF_STATE_IN_REQUEST &&
+ q->uses_requests)) {
+ dprintk(q, 1, "queue in wrong mode (qbuf vs requests)\n");
+ return -EBUSY;
+ }
+
+ if (req) {
+ int ret;
+
+ q->uses_requests = 1;
+ if (vb->state != VB2_BUF_STATE_DEQUEUED) {
+ dprintk(q, 1, "buffer %d not in dequeued state\n",
+ vb->index);
+ return -EINVAL;
+ }
+
+ if (q->is_output && !vb->prepared) {
+ ret = call_vb_qop(vb, buf_out_validate, vb);
+ if (ret) {
+ dprintk(q, 1, "buffer validation failed\n");
+ return ret;
+ }
+ }
+
+ media_request_object_init(&vb->req_obj);
+
+ /* Make sure the request is in a safe state for updating. */
+ ret = media_request_lock_for_update(req);
+ if (ret)
+ return ret;
+ ret = media_request_object_bind(req, &vb2_core_req_ops,
+ q, true, &vb->req_obj);
+ media_request_unlock_for_update(req);
+ if (ret)
+ return ret;
+
+ vb->state = VB2_BUF_STATE_IN_REQUEST;
+
+ /*
+ * Increment the refcount and store the request.
+ * The request refcount is decremented again when the
+ * buffer is dequeued. This is to prevent vb2_buffer_done()
+ * from freeing the request from interrupt context, which can
+ * happen if the application closed the request fd after
+ * queueing the request.
+ */
+ media_request_get(req);
+ vb->request = req;
+
+ /* Fill buffer information for the userspace */
+ if (pb) {
+ call_void_bufop(q, copy_timestamp, vb, pb);
+ call_void_bufop(q, fill_user_buffer, vb, pb);
+ }
+
+ dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index);
+ return 0;
+ }
+
+ if (vb->state != VB2_BUF_STATE_IN_REQUEST)
+ q->uses_qbuf = 1;
+
+ switch (vb->state) {
+ case VB2_BUF_STATE_DEQUEUED:
+ case VB2_BUF_STATE_IN_REQUEST:
+ if (!vb->prepared) {
+ ret = __buf_prepare(vb);
+ if (ret)
+ return ret;
+ }
+ break;
+ case VB2_BUF_STATE_PREPARING:
+ dprintk(q, 1, "buffer still being prepared\n");
+ return -EINVAL;
+ default:
+ dprintk(q, 1, "invalid buffer state %s\n",
+ vb2_state_name(vb->state));
+ return -EINVAL;
+ }
+
+ /*
+ * Add to the queued buffers list, a buffer will stay on it until
+ * dequeued in dqbuf.
+ */
+ orig_state = vb->state;
+ list_add_tail(&vb->queued_entry, &q->queued_list);
+ q->queued_count++;
+ q->waiting_for_buffers = false;
+ vb->state = VB2_BUF_STATE_QUEUED;
+
+ if (pb)
+ call_void_bufop(q, copy_timestamp, vb, pb);
+
+ trace_vb2_qbuf(q, vb);
+
+ /*
+ * If already streaming, give the buffer to driver for processing.
+ * If not, the buffer will be given to driver on next streamon.
+ */
+ if (q->start_streaming_called)
+ __enqueue_in_driver(vb);
+
+ /* Fill buffer information for the userspace */
+ if (pb)
+ call_void_bufop(q, fill_user_buffer, vb, pb);
+
+ /*
+ * If streamon has been called, and we haven't yet called
+ * start_streaming() since not enough buffers were queued, and
+ * we now have reached the minimum number of queued buffers,
+ * then we can finally call start_streaming().
+ */
+ if (q->streaming && !q->start_streaming_called &&
+ q->queued_count >= q->min_buffers_needed) {
+ ret = vb2_start_streaming(q);
+ if (ret) {
+ /*
+ * Since vb2_core_qbuf will return with an error,
+ * we should return it to state DEQUEUED since
+ * the error indicates that the buffer wasn't queued.
+ */
+ list_del(&vb->queued_entry);
+ q->queued_count--;
+ vb->state = orig_state;
+ return ret;
+ }
+ }
+
+ dprintk(q, 2, "qbuf of buffer %d succeeded\n", vb->index);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_core_qbuf);
+
+/*
+ * __vb2_wait_for_done_vb() - wait for a buffer to become available
+ * for dequeuing
+ *
+ * Will sleep if required for nonblocking == false.
+ */
+static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
+{
+ /*
+ * All operations on vb_done_list are performed under done_lock
+ * spinlock protection. However, buffers may be removed from
+ * it and returned to userspace only while holding both driver's
+ * lock and the done_lock spinlock. Thus we can be sure that as
+ * long as we hold the driver's lock, the list will remain not
+ * empty if list_empty() check succeeds.
+ */
+
+ for (;;) {
+ int ret;
+
+ if (q->waiting_in_dqbuf) {
+ dprintk(q, 1, "another dup()ped fd is waiting for a buffer\n");
+ return -EBUSY;
+ }
+
+ if (!q->streaming) {
+ dprintk(q, 1, "streaming off, will not wait for buffers\n");
+ return -EINVAL;
+ }
+
+ if (q->error) {
+ dprintk(q, 1, "Queue in error state, will not wait for buffers\n");
+ return -EIO;
+ }
+
+ if (q->last_buffer_dequeued) {
+ dprintk(q, 3, "last buffer dequeued already, will not wait for buffers\n");
+ return -EPIPE;
+ }
+
+ if (!list_empty(&q->done_list)) {
+ /*
+ * Found a buffer that we were waiting for.
+ */
+ break;
+ }
+
+ if (nonblocking) {
+ dprintk(q, 3, "nonblocking and no buffers to dequeue, will not wait\n");
+ return -EAGAIN;
+ }
+
+ q->waiting_in_dqbuf = 1;
+ /*
+ * We are streaming and blocking, wait for another buffer to
+ * become ready or for streamoff. Driver's lock is released to
+ * allow streamoff or qbuf to be called while waiting.
+ */
+ call_void_qop(q, wait_prepare, q);
+
+ /*
+ * All locks have been released, it is safe to sleep now.
+ */
+ dprintk(q, 3, "will sleep waiting for buffers\n");
+ ret = wait_event_interruptible(q->done_wq,
+ !list_empty(&q->done_list) || !q->streaming ||
+ q->error);
+
+ /*
+ * We need to reevaluate both conditions again after reacquiring
+ * the locks or return an error if one occurred.
+ */
+ call_void_qop(q, wait_finish, q);
+ q->waiting_in_dqbuf = 0;
+ if (ret) {
+ dprintk(q, 1, "sleep was interrupted\n");
+ return ret;
+ }
+ }
+ return 0;
+}
+
+/*
+ * __vb2_get_done_vb() - get a buffer ready for dequeuing
+ *
+ * Will sleep if required for nonblocking == false.
+ */
+static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
+ void *pb, int nonblocking)
+{
+ unsigned long flags;
+ int ret = 0;
+
+ /*
+ * Wait for at least one buffer to become available on the done_list.
+ */
+ ret = __vb2_wait_for_done_vb(q, nonblocking);
+ if (ret)
+ return ret;
+
+ /*
+ * Driver's lock has been held since we last verified that done_list
+ * is not empty, so no need for another list_empty(done_list) check.
+ */
+ spin_lock_irqsave(&q->done_lock, flags);
+ *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
+ /*
+ * Only remove the buffer from done_list if all planes can be
+ * handled. Some cases such as V4L2 file I/O and DVB have pb
+ * == NULL; skip the check then as there's nothing to verify.
+ */
+ if (pb)
+ ret = call_bufop(q, verify_planes_array, *vb, pb);
+ if (!ret)
+ list_del(&(*vb)->done_entry);
+ spin_unlock_irqrestore(&q->done_lock, flags);
+
+ return ret;
+}
+
+int vb2_wait_for_all_buffers(struct vb2_queue *q)
+{
+ if (!q->streaming) {
+ dprintk(q, 1, "streaming off, will not wait for buffers\n");
+ return -EINVAL;
+ }
+
+ if (q->start_streaming_called)
+ wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
+
+/*
+ * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
+ */
+static void __vb2_dqbuf(struct vb2_buffer *vb)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+
+ /* nothing to do if the buffer is already dequeued */
+ if (vb->state == VB2_BUF_STATE_DEQUEUED)
+ return;
+
+ vb->state = VB2_BUF_STATE_DEQUEUED;
+
+ call_void_bufop(q, init_buffer, vb);
+}
+
+int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
+ bool nonblocking)
+{
+ struct vb2_buffer *vb = NULL;
+ int ret;
+
+ ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
+ if (ret < 0)
+ return ret;
+
+ switch (vb->state) {
+ case VB2_BUF_STATE_DONE:
+ dprintk(q, 3, "returning done buffer\n");
+ break;
+ case VB2_BUF_STATE_ERROR:
+ dprintk(q, 3, "returning done buffer with errors\n");
+ break;
+ default:
+ dprintk(q, 1, "invalid buffer state %s\n",
+ vb2_state_name(vb->state));
+ return -EINVAL;
+ }
+
+ call_void_vb_qop(vb, buf_finish, vb);
+ vb->prepared = 0;
+
+ if (pindex)
+ *pindex = vb->index;
+
+ /* Fill buffer information for the userspace */
+ if (pb)
+ call_void_bufop(q, fill_user_buffer, vb, pb);
+
+ /* Remove from vb2 queue */
+ list_del(&vb->queued_entry);
+ q->queued_count--;
+
+ trace_vb2_dqbuf(q, vb);
+
+ /* go back to dequeued state */
+ __vb2_dqbuf(vb);
+
+ if (WARN_ON(vb->req_obj.req)) {
+ media_request_object_unbind(&vb->req_obj);
+ media_request_object_put(&vb->req_obj);
+ }
+ if (vb->request)
+ media_request_put(vb->request);
+ vb->request = NULL;
+
+ dprintk(q, 2, "dqbuf of buffer %d, state: %s\n",
+ vb->index, vb2_state_name(vb->state));
+
+ return 0;
+
+}
+EXPORT_SYMBOL_GPL(vb2_core_dqbuf);
+
+/*
+ * __vb2_queue_cancel() - cancel and stop (pause) streaming
+ *
+ * Removes all queued buffers from driver's queue and all buffers queued by
+ * userspace from vb2's queue. Returns to state after reqbufs.
+ */
+static void __vb2_queue_cancel(struct vb2_queue *q)
+{
+ unsigned int i;
+
+ /*
+ * Tell driver to stop all transactions and release all queued
+ * buffers.
+ */
+ if (q->start_streaming_called)
+ call_void_qop(q, stop_streaming, q);
+
+ /*
+ * If you see this warning, then the driver isn't cleaning up properly
+ * in stop_streaming(). See the stop_streaming() documentation in
+ * videobuf2-core.h for more information how buffers should be returned
+ * to vb2 in stop_streaming().
+ */
+ if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
+ for (i = 0; i < q->num_buffers; ++i)
+ if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE) {
+ pr_warn("driver bug: stop_streaming operation is leaving buf %p in active state\n",
+ q->bufs[i]);
+ vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR);
+ }
+ /* Must be zero now */
+ WARN_ON(atomic_read(&q->owned_by_drv_count));
+ }
+
+ q->streaming = 0;
+ q->start_streaming_called = 0;
+ q->queued_count = 0;
+ q->error = 0;
+ q->uses_requests = 0;
+ q->uses_qbuf = 0;
+
+ /*
+ * Remove all buffers from vb2's list...
+ */
+ INIT_LIST_HEAD(&q->queued_list);
+ /*
+ * ...and done list; userspace will not receive any buffers it
+ * has not already dequeued before initiating cancel.
+ */
+ INIT_LIST_HEAD(&q->done_list);
+ atomic_set(&q->owned_by_drv_count, 0);
+ wake_up_all(&q->done_wq);
+
+ /*
+ * Reinitialize all buffers for next use.
+ * Make sure to call buf_finish for any queued buffers. Normally
+ * that's done in dqbuf, but that's not going to happen when we
+ * cancel the whole queue. Note: this code belongs here, not in
+ * __vb2_dqbuf() since in vb2_core_dqbuf() there is a critical
+ * call to __fill_user_buffer() after buf_finish(). That order can't
+ * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
+ */
+ for (i = 0; i < q->num_buffers; ++i) {
+ struct vb2_buffer *vb = q->bufs[i];
+ struct media_request *req = vb->req_obj.req;
+
+ /*
+ * If a request is associated with this buffer, then
+ * call buf_request_cancel() to give the driver to complete()
+ * related request objects. Otherwise those objects would
+ * never complete.
+ */
+ if (req) {
+ enum media_request_state state;
+ unsigned long flags;
+
+ spin_lock_irqsave(&req->lock, flags);
+ state = req->state;
+ spin_unlock_irqrestore(&req->lock, flags);
+
+ if (state == MEDIA_REQUEST_STATE_QUEUED)
+ call_void_vb_qop(vb, buf_request_complete, vb);
+ }
+
+ __vb2_buf_mem_finish(vb);
+
+ if (vb->prepared) {
+ call_void_vb_qop(vb, buf_finish, vb);
+ vb->prepared = 0;
+ }
+ __vb2_dqbuf(vb);
+
+ if (vb->req_obj.req) {
+ media_request_object_unbind(&vb->req_obj);
+ media_request_object_put(&vb->req_obj);
+ }
+ if (vb->request)
+ media_request_put(vb->request);
+ vb->request = NULL;
+ vb->copied_timestamp = 0;
+ }
+}
+
+int vb2_core_streamon(struct vb2_queue *q, unsigned int type)
+{
+ int ret;
+
+ if (type != q->type) {
+ dprintk(q, 1, "invalid stream type\n");
+ return -EINVAL;
+ }
+
+ if (q->streaming) {
+ dprintk(q, 3, "already streaming\n");
+ return 0;
+ }
+
+ if (!q->num_buffers) {
+ dprintk(q, 1, "no buffers have been allocated\n");
+ return -EINVAL;
+ }
+
+ if (q->num_buffers < q->min_buffers_needed) {
+ dprintk(q, 1, "need at least %u allocated buffers\n",
+ q->min_buffers_needed);
+ return -EINVAL;
+ }
+
+ /*
+ * Tell driver to start streaming provided sufficient buffers
+ * are available.
+ */
+ if (q->queued_count >= q->min_buffers_needed) {
+ ret = v4l_vb2q_enable_media_source(q);
+ if (ret)
+ return ret;
+ ret = vb2_start_streaming(q);
+ if (ret)
+ return ret;
+ }
+
+ q->streaming = 1;
+
+ dprintk(q, 3, "successful\n");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_core_streamon);
+
+void vb2_queue_error(struct vb2_queue *q)
+{
+ q->error = 1;
+
+ wake_up_all(&q->done_wq);
+}
+EXPORT_SYMBOL_GPL(vb2_queue_error);
+
+int vb2_core_streamoff(struct vb2_queue *q, unsigned int type)
+{
+ if (type != q->type) {
+ dprintk(q, 1, "invalid stream type\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Cancel will pause streaming and remove all buffers from the driver
+ * and vb2, effectively returning control over them to userspace.
+ *
+ * Note that we do this even if q->streaming == 0: if you prepare or
+ * queue buffers, and then call streamoff without ever having called
+ * streamon, you would still expect those buffers to be returned to
+ * their normal dequeued state.
+ */
+ __vb2_queue_cancel(q);
+ q->waiting_for_buffers = !q->is_output;
+ q->last_buffer_dequeued = false;
+
+ dprintk(q, 3, "successful\n");
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_core_streamoff);
+
+/*
+ * __find_plane_by_offset() - find plane associated with the given offset off
+ */
+static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
+ unsigned int *_buffer, unsigned int *_plane)
+{
+ struct vb2_buffer *vb;
+ unsigned int buffer, plane;
+
+ /*
+ * Sanity checks to ensure the lock is held, MEMORY_MMAP is
+ * used and fileio isn't active.
+ */
+ lockdep_assert_held(&q->mmap_lock);
+
+ if (q->memory != VB2_MEMORY_MMAP) {
+ dprintk(q, 1, "queue is not currently set up for mmap\n");
+ return -EINVAL;
+ }
+
+ if (vb2_fileio_is_active(q)) {
+ dprintk(q, 1, "file io in progress\n");
+ return -EBUSY;
+ }
+
+ /*
+ * Go over all buffers and their planes, comparing the given offset
+ * with an offset assigned to each plane. If a match is found,
+ * return its buffer and plane numbers.
+ */
+ for (buffer = 0; buffer < q->num_buffers; ++buffer) {
+ vb = q->bufs[buffer];
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ if (vb->planes[plane].m.offset == off) {
+ *_buffer = buffer;
+ *_plane = plane;
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+int vb2_core_expbuf(struct vb2_queue *q, int *fd, unsigned int type,
+ unsigned int index, unsigned int plane, unsigned int flags)
+{
+ struct vb2_buffer *vb = NULL;
+ struct vb2_plane *vb_plane;
+ int ret;
+ struct dma_buf *dbuf;
+
+ if (q->memory != VB2_MEMORY_MMAP) {
+ dprintk(q, 1, "queue is not currently set up for mmap\n");
+ return -EINVAL;
+ }
+
+ if (!q->mem_ops->get_dmabuf) {
+ dprintk(q, 1, "queue does not support DMA buffer exporting\n");
+ return -EINVAL;
+ }
+
+ if (flags & ~(O_CLOEXEC | O_ACCMODE)) {
+ dprintk(q, 1, "queue does support only O_CLOEXEC and access mode flags\n");
+ return -EINVAL;
+ }
+
+ if (type != q->type) {
+ dprintk(q, 1, "invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ if (index >= q->num_buffers) {
+ dprintk(q, 1, "buffer index out of range\n");
+ return -EINVAL;
+ }
+
+ vb = q->bufs[index];
+
+ if (plane >= vb->num_planes) {
+ dprintk(q, 1, "buffer plane out of range\n");
+ return -EINVAL;
+ }
+
+ if (vb2_fileio_is_active(q)) {
+ dprintk(q, 1, "expbuf: file io in progress\n");
+ return -EBUSY;
+ }
+
+ vb_plane = &vb->planes[plane];
+
+ dbuf = call_ptr_memop(get_dmabuf,
+ vb,
+ vb_plane->mem_priv,
+ flags & O_ACCMODE);
+ if (IS_ERR_OR_NULL(dbuf)) {
+ dprintk(q, 1, "failed to export buffer %d, plane %d\n",
+ index, plane);
+ return -EINVAL;
+ }
+
+ ret = dma_buf_fd(dbuf, flags & ~O_ACCMODE);
+ if (ret < 0) {
+ dprintk(q, 3, "buffer %d, plane %d failed to export (%d)\n",
+ index, plane, ret);
+ dma_buf_put(dbuf);
+ return ret;
+ }
+
+ dprintk(q, 3, "buffer %d, plane %d exported as %d descriptor\n",
+ index, plane, ret);
+ *fd = ret;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_core_expbuf);
+
+int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
+{
+ unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
+ struct vb2_buffer *vb;
+ unsigned int buffer = 0, plane = 0;
+ int ret;
+ unsigned long length;
+
+ /*
+ * Check memory area access mode.
+ */
+ if (!(vma->vm_flags & VM_SHARED)) {
+ dprintk(q, 1, "invalid vma flags, VM_SHARED needed\n");
+ return -EINVAL;
+ }
+ if (q->is_output) {
+ if (!(vma->vm_flags & VM_WRITE)) {
+ dprintk(q, 1, "invalid vma flags, VM_WRITE needed\n");
+ return -EINVAL;
+ }
+ } else {
+ if (!(vma->vm_flags & VM_READ)) {
+ dprintk(q, 1, "invalid vma flags, VM_READ needed\n");
+ return -EINVAL;
+ }
+ }
+
+ mutex_lock(&q->mmap_lock);
+
+ /*
+ * Find the plane corresponding to the offset passed by userspace. This
+ * will return an error if not MEMORY_MMAP or file I/O is in progress.
+ */
+ ret = __find_plane_by_offset(q, off, &buffer, &plane);
+ if (ret)
+ goto unlock;
+
+ vb = q->bufs[buffer];
+
+ /*
+ * MMAP requires page_aligned buffers.
+ * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
+ * so, we need to do the same here.
+ */
+ length = PAGE_ALIGN(vb->planes[plane].length);
+ if (length < (vma->vm_end - vma->vm_start)) {
+ dprintk(q, 1,
+ "MMAP invalid, as it would overflow buffer length\n");
+ ret = -EINVAL;
+ goto unlock;
+ }
+
+ /*
+ * vm_pgoff is treated in V4L2 API as a 'cookie' to select a buffer,
+ * not as a in-buffer offset. We always want to mmap a whole buffer
+ * from its beginning.
+ */
+ vma->vm_pgoff = 0;
+
+ ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
+
+unlock:
+ mutex_unlock(&q->mmap_lock);
+ if (ret)
+ return ret;
+
+ dprintk(q, 3, "buffer %d, plane %d successfully mapped\n", buffer, plane);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_mmap);
+
+#ifndef CONFIG_MMU
+unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
+ unsigned long addr,
+ unsigned long len,
+ unsigned long pgoff,
+ unsigned long flags)
+{
+ unsigned long off = pgoff << PAGE_SHIFT;
+ struct vb2_buffer *vb;
+ unsigned int buffer, plane;
+ void *vaddr;
+ int ret;
+
+ mutex_lock(&q->mmap_lock);
+
+ /*
+ * Find the plane corresponding to the offset passed by userspace. This
+ * will return an error if not MEMORY_MMAP or file I/O is in progress.
+ */
+ ret = __find_plane_by_offset(q, off, &buffer, &plane);
+ if (ret)
+ goto unlock;
+
+ vb = q->bufs[buffer];
+
+ vaddr = vb2_plane_vaddr(vb, plane);
+ mutex_unlock(&q->mmap_lock);
+ return vaddr ? (unsigned long)vaddr : -EINVAL;
+
+unlock:
+ mutex_unlock(&q->mmap_lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
+#endif
+
+int vb2_core_queue_init(struct vb2_queue *q)
+{
+ /*
+ * Sanity check
+ */
+ if (WARN_ON(!q) ||
+ WARN_ON(!q->ops) ||
+ WARN_ON(!q->mem_ops) ||
+ WARN_ON(!q->type) ||
+ WARN_ON(!q->io_modes) ||
+ WARN_ON(!q->ops->queue_setup) ||
+ WARN_ON(!q->ops->buf_queue))
+ return -EINVAL;
+
+ if (WARN_ON(q->requires_requests && !q->supports_requests))
+ return -EINVAL;
+
+ /*
+ * This combination is not allowed since a non-zero value of
+ * q->min_buffers_needed can cause vb2_core_qbuf() to fail if
+ * it has to call start_streaming(), and the Request API expects
+ * that queueing a request (and thus queueing a buffer contained
+ * in that request) will always succeed. There is no method of
+ * propagating an error back to userspace.
+ */
+ if (WARN_ON(q->supports_requests && q->min_buffers_needed))
+ return -EINVAL;
+
+ INIT_LIST_HEAD(&q->queued_list);
+ INIT_LIST_HEAD(&q->done_list);
+ spin_lock_init(&q->done_lock);
+ mutex_init(&q->mmap_lock);
+ init_waitqueue_head(&q->done_wq);
+
+ q->memory = VB2_MEMORY_UNKNOWN;
+
+ if (q->buf_struct_size == 0)
+ q->buf_struct_size = sizeof(struct vb2_buffer);
+
+ if (q->bidirectional)
+ q->dma_dir = DMA_BIDIRECTIONAL;
+ else
+ q->dma_dir = q->is_output ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+
+ if (q->name[0] == '\0')
+ snprintf(q->name, sizeof(q->name), "%s-%p",
+ q->is_output ? "out" : "cap", q);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_core_queue_init);
+
+static int __vb2_init_fileio(struct vb2_queue *q, int read);
+static int __vb2_cleanup_fileio(struct vb2_queue *q);
+void vb2_core_queue_release(struct vb2_queue *q)
+{
+ __vb2_cleanup_fileio(q);
+ __vb2_queue_cancel(q);
+ mutex_lock(&q->mmap_lock);
+ __vb2_queue_free(q, q->num_buffers);
+ mutex_unlock(&q->mmap_lock);
+}
+EXPORT_SYMBOL_GPL(vb2_core_queue_release);
+
+__poll_t vb2_core_poll(struct vb2_queue *q, struct file *file,
+ poll_table *wait)
+{
+ __poll_t req_events = poll_requested_events(wait);
+ struct vb2_buffer *vb = NULL;
+ unsigned long flags;
+
+ /*
+ * poll_wait() MUST be called on the first invocation on all the
+ * potential queues of interest, even if we are not interested in their
+ * events during this first call. Failure to do so will result in
+ * queue's events to be ignored because the poll_table won't be capable
+ * of adding new wait queues thereafter.
+ */
+ poll_wait(file, &q->done_wq, wait);
+
+ if (!q->is_output && !(req_events & (EPOLLIN | EPOLLRDNORM)))
+ return 0;
+ if (q->is_output && !(req_events & (EPOLLOUT | EPOLLWRNORM)))
+ return 0;
+
+ /*
+ * Start file I/O emulator only if streaming API has not been used yet.
+ */
+ if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
+ if (!q->is_output && (q->io_modes & VB2_READ) &&
+ (req_events & (EPOLLIN | EPOLLRDNORM))) {
+ if (__vb2_init_fileio(q, 1))
+ return EPOLLERR;
+ }
+ if (q->is_output && (q->io_modes & VB2_WRITE) &&
+ (req_events & (EPOLLOUT | EPOLLWRNORM))) {
+ if (__vb2_init_fileio(q, 0))
+ return EPOLLERR;
+ /*
+ * Write to OUTPUT queue can be done immediately.
+ */
+ return EPOLLOUT | EPOLLWRNORM;
+ }
+ }
+
+ /*
+ * There is nothing to wait for if the queue isn't streaming, or if the
+ * error flag is set.
+ */
+ if (!vb2_is_streaming(q) || q->error)
+ return EPOLLERR;
+
+ /*
+ * If this quirk is set and QBUF hasn't been called yet then
+ * return EPOLLERR as well. This only affects capture queues, output
+ * queues will always initialize waiting_for_buffers to false.
+ * This quirk is set by V4L2 for backwards compatibility reasons.
+ */
+ if (q->quirk_poll_must_check_waiting_for_buffers &&
+ q->waiting_for_buffers && (req_events & (EPOLLIN | EPOLLRDNORM)))
+ return EPOLLERR;
+
+ /*
+ * For output streams you can call write() as long as there are fewer
+ * buffers queued than there are buffers available.
+ */
+ if (q->is_output && q->fileio && q->queued_count < q->num_buffers)
+ return EPOLLOUT | EPOLLWRNORM;
+
+ if (list_empty(&q->done_list)) {
+ /*
+ * If the last buffer was dequeued from a capture queue,
+ * return immediately. DQBUF will return -EPIPE.
+ */
+ if (q->last_buffer_dequeued)
+ return EPOLLIN | EPOLLRDNORM;
+ }
+
+ /*
+ * Take first buffer available for dequeuing.
+ */
+ spin_lock_irqsave(&q->done_lock, flags);
+ if (!list_empty(&q->done_list))
+ vb = list_first_entry(&q->done_list, struct vb2_buffer,
+ done_entry);
+ spin_unlock_irqrestore(&q->done_lock, flags);
+
+ if (vb && (vb->state == VB2_BUF_STATE_DONE
+ || vb->state == VB2_BUF_STATE_ERROR)) {
+ return (q->is_output) ?
+ EPOLLOUT | EPOLLWRNORM :
+ EPOLLIN | EPOLLRDNORM;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_core_poll);
+
+/*
+ * struct vb2_fileio_buf - buffer context used by file io emulator
+ *
+ * vb2 provides a compatibility layer and emulator of file io (read and
+ * write) calls on top of streaming API. This structure is used for
+ * tracking context related to the buffers.
+ */
+struct vb2_fileio_buf {
+ void *vaddr;
+ unsigned int size;
+ unsigned int pos;
+ unsigned int queued:1;
+};
+
+/*
+ * struct vb2_fileio_data - queue context used by file io emulator
+ *
+ * @cur_index: the index of the buffer currently being read from or
+ * written to. If equal to q->num_buffers then a new buffer
+ * must be dequeued.
+ * @initial_index: in the read() case all buffers are queued up immediately
+ * in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
+ * buffers. However, in the write() case no buffers are initially
+ * queued, instead whenever a buffer is full it is queued up by
+ * __vb2_perform_fileio(). Only once all available buffers have
+ * been queued up will __vb2_perform_fileio() start to dequeue
+ * buffers. This means that initially __vb2_perform_fileio()
+ * needs to know what buffer index to use when it is queuing up
+ * the buffers for the first time. That initial index is stored
+ * in this field. Once it is equal to q->num_buffers all
+ * available buffers have been queued and __vb2_perform_fileio()
+ * should start the normal dequeue/queue cycle.
+ *
+ * vb2 provides a compatibility layer and emulator of file io (read and
+ * write) calls on top of streaming API. For proper operation it required
+ * this structure to save the driver state between each call of the read
+ * or write function.
+ */
+struct vb2_fileio_data {
+ unsigned int count;
+ unsigned int type;
+ unsigned int memory;
+ struct vb2_fileio_buf bufs[VB2_MAX_FRAME];
+ unsigned int cur_index;
+ unsigned int initial_index;
+ unsigned int q_count;
+ unsigned int dq_count;
+ unsigned read_once:1;
+ unsigned write_immediately:1;
+};
+
+/*
+ * __vb2_init_fileio() - initialize file io emulator
+ * @q: videobuf2 queue
+ * @read: mode selector (1 means read, 0 means write)
+ */
+static int __vb2_init_fileio(struct vb2_queue *q, int read)
+{
+ struct vb2_fileio_data *fileio;
+ int i, ret;
+ unsigned int count = 0;
+
+ /*
+ * Sanity check
+ */
+ if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
+ (!read && !(q->io_modes & VB2_WRITE))))
+ return -EINVAL;
+
+ /*
+ * Check if device supports mapping buffers to kernel virtual space.
+ */
+ if (!q->mem_ops->vaddr)
+ return -EBUSY;
+
+ /*
+ * Check if streaming api has not been already activated.
+ */
+ if (q->streaming || q->num_buffers > 0)
+ return -EBUSY;
+
+ /*
+ * Start with count 1, driver can increase it in queue_setup()
+ */
+ count = 1;
+
+ dprintk(q, 3, "setting up file io: mode %s, count %d, read_once %d, write_immediately %d\n",
+ (read) ? "read" : "write", count, q->fileio_read_once,
+ q->fileio_write_immediately);
+
+ fileio = kzalloc(sizeof(*fileio), GFP_KERNEL);
+ if (fileio == NULL)
+ return -ENOMEM;
+
+ fileio->read_once = q->fileio_read_once;
+ fileio->write_immediately = q->fileio_write_immediately;
+
+ /*
+ * Request buffers and use MMAP type to force driver
+ * to allocate buffers by itself.
+ */
+ fileio->count = count;
+ fileio->memory = VB2_MEMORY_MMAP;
+ fileio->type = q->type;
+ q->fileio = fileio;
+ ret = vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
+ if (ret)
+ goto err_kfree;
+
+ /*
+ * Check if plane_count is correct
+ * (multiplane buffers are not supported).
+ */
+ if (q->bufs[0]->num_planes != 1) {
+ ret = -EBUSY;
+ goto err_reqbufs;
+ }
+
+ /*
+ * Get kernel address of each buffer.
+ */
+ for (i = 0; i < q->num_buffers; i++) {
+ fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
+ if (fileio->bufs[i].vaddr == NULL) {
+ ret = -EINVAL;
+ goto err_reqbufs;
+ }
+ fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
+ }
+
+ /*
+ * Read mode requires pre queuing of all buffers.
+ */
+ if (read) {
+ /*
+ * Queue all buffers.
+ */
+ for (i = 0; i < q->num_buffers; i++) {
+ ret = vb2_core_qbuf(q, i, NULL, NULL);
+ if (ret)
+ goto err_reqbufs;
+ fileio->bufs[i].queued = 1;
+ }
+ /*
+ * All buffers have been queued, so mark that by setting
+ * initial_index to q->num_buffers
+ */
+ fileio->initial_index = q->num_buffers;
+ fileio->cur_index = q->num_buffers;
+ }
+
+ /*
+ * Start streaming.
+ */
+ ret = vb2_core_streamon(q, q->type);
+ if (ret)
+ goto err_reqbufs;
+
+ return ret;
+
+err_reqbufs:
+ fileio->count = 0;
+ vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
+
+err_kfree:
+ q->fileio = NULL;
+ kfree(fileio);
+ return ret;
+}
+
+/*
+ * __vb2_cleanup_fileio() - free resourced used by file io emulator
+ * @q: videobuf2 queue
+ */
+static int __vb2_cleanup_fileio(struct vb2_queue *q)
+{
+ struct vb2_fileio_data *fileio = q->fileio;
+
+ if (fileio) {
+ vb2_core_streamoff(q, q->type);
+ q->fileio = NULL;
+ fileio->count = 0;
+ vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count);
+ kfree(fileio);
+ dprintk(q, 3, "file io emulator closed\n");
+ }
+ return 0;
+}
+
+/*
+ * __vb2_perform_fileio() - perform a single file io (read or write) operation
+ * @q: videobuf2 queue
+ * @data: pointed to target userspace buffer
+ * @count: number of bytes to read or write
+ * @ppos: file handle position tracking pointer
+ * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking)
+ * @read: access mode selector (1 means read, 0 means write)
+ */
+static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
+ loff_t *ppos, int nonblock, int read)
+{
+ struct vb2_fileio_data *fileio;
+ struct vb2_fileio_buf *buf;
+ bool is_multiplanar = q->is_multiplanar;
+ /*
+ * When using write() to write data to an output video node the vb2 core
+ * should copy timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody
+ * else is able to provide this information with the write() operation.
+ */
+ bool copy_timestamp = !read && q->copy_timestamp;
+ unsigned index;
+ int ret;
+
+ dprintk(q, 3, "mode %s, offset %ld, count %zd, %sblocking\n",
+ read ? "read" : "write", (long)*ppos, count,
+ nonblock ? "non" : "");
+
+ if (!data)
+ return -EINVAL;
+
+ if (q->waiting_in_dqbuf) {
+ dprintk(q, 3, "another dup()ped fd is %s\n",
+ read ? "reading" : "writing");
+ return -EBUSY;
+ }
+
+ /*
+ * Initialize emulator on first call.
+ */
+ if (!vb2_fileio_is_active(q)) {
+ ret = __vb2_init_fileio(q, read);
+ dprintk(q, 3, "vb2_init_fileio result: %d\n", ret);
+ if (ret)
+ return ret;
+ }
+ fileio = q->fileio;
+
+ /*
+ * Check if we need to dequeue the buffer.
+ */
+ index = fileio->cur_index;
+ if (index >= q->num_buffers) {
+ struct vb2_buffer *b;
+
+ /*
+ * Call vb2_dqbuf to get buffer back.
+ */
+ ret = vb2_core_dqbuf(q, &index, NULL, nonblock);
+ dprintk(q, 5, "vb2_dqbuf result: %d\n", ret);
+ if (ret)
+ return ret;
+ fileio->dq_count += 1;
+
+ fileio->cur_index = index;
+ buf = &fileio->bufs[index];
+ b = q->bufs[index];
+
+ /*
+ * Get number of bytes filled by the driver
+ */
+ buf->pos = 0;
+ buf->queued = 0;
+ buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
+ : vb2_plane_size(q->bufs[index], 0);
+ /* Compensate for data_offset on read in the multiplanar case. */
+ if (is_multiplanar && read &&
+ b->planes[0].data_offset < buf->size) {
+ buf->pos = b->planes[0].data_offset;
+ buf->size -= buf->pos;
+ }
+ } else {
+ buf = &fileio->bufs[index];
+ }
+
+ /*
+ * Limit count on last few bytes of the buffer.
+ */
+ if (buf->pos + count > buf->size) {
+ count = buf->size - buf->pos;
+ dprintk(q, 5, "reducing read count: %zd\n", count);
+ }
+
+ /*
+ * Transfer data to userspace.
+ */
+ dprintk(q, 3, "copying %zd bytes - buffer %d, offset %u\n",
+ count, index, buf->pos);
+ if (read)
+ ret = copy_to_user(data, buf->vaddr + buf->pos, count);
+ else
+ ret = copy_from_user(buf->vaddr + buf->pos, data, count);
+ if (ret) {
+ dprintk(q, 3, "error copying data\n");
+ return -EFAULT;
+ }
+
+ /*
+ * Update counters.
+ */
+ buf->pos += count;
+ *ppos += count;
+
+ /*
+ * Queue next buffer if required.
+ */
+ if (buf->pos == buf->size || (!read && fileio->write_immediately)) {
+ struct vb2_buffer *b = q->bufs[index];
+
+ /*
+ * Check if this is the last buffer to read.
+ */
+ if (read && fileio->read_once && fileio->dq_count == 1) {
+ dprintk(q, 3, "read limit reached\n");
+ return __vb2_cleanup_fileio(q);
+ }
+
+ /*
+ * Call vb2_qbuf and give buffer to the driver.
+ */
+ b->planes[0].bytesused = buf->pos;
+
+ if (copy_timestamp)
+ b->timestamp = ktime_get_ns();
+ ret = vb2_core_qbuf(q, index, NULL, NULL);
+ dprintk(q, 5, "vb2_dbuf result: %d\n", ret);
+ if (ret)
+ return ret;
+
+ /*
+ * Buffer has been queued, update the status
+ */
+ buf->pos = 0;
+ buf->queued = 1;
+ buf->size = vb2_plane_size(q->bufs[index], 0);
+ fileio->q_count += 1;
+ /*
+ * If we are queuing up buffers for the first time, then
+ * increase initial_index by one.
+ */
+ if (fileio->initial_index < q->num_buffers)
+ fileio->initial_index++;
+ /*
+ * The next buffer to use is either a buffer that's going to be
+ * queued for the first time (initial_index < q->num_buffers)
+ * or it is equal to q->num_buffers, meaning that the next
+ * time we need to dequeue a buffer since we've now queued up
+ * all the 'first time' buffers.
+ */
+ fileio->cur_index = fileio->initial_index;
+ }
+
+ /*
+ * Return proper number of bytes processed.
+ */
+ if (ret == 0)
+ ret = count;
+ return ret;
+}
+
+size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
+ loff_t *ppos, int nonblocking)
+{
+ return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
+}
+EXPORT_SYMBOL_GPL(vb2_read);
+
+size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
+ loff_t *ppos, int nonblocking)
+{
+ return __vb2_perform_fileio(q, (char __user *) data, count,
+ ppos, nonblocking, 0);
+}
+EXPORT_SYMBOL_GPL(vb2_write);
+
+struct vb2_threadio_data {
+ struct task_struct *thread;
+ vb2_thread_fnc fnc;
+ void *priv;
+ bool stop;
+};
+
+static int vb2_thread(void *data)
+{
+ struct vb2_queue *q = data;
+ struct vb2_threadio_data *threadio = q->threadio;
+ bool copy_timestamp = false;
+ unsigned prequeue = 0;
+ unsigned index = 0;
+ int ret = 0;
+
+ if (q->is_output) {
+ prequeue = q->num_buffers;
+ copy_timestamp = q->copy_timestamp;
+ }
+
+ set_freezable();
+
+ for (;;) {
+ struct vb2_buffer *vb;
+
+ /*
+ * Call vb2_dqbuf to get buffer back.
+ */
+ if (prequeue) {
+ vb = q->bufs[index++];
+ prequeue--;
+ } else {
+ call_void_qop(q, wait_finish, q);
+ if (!threadio->stop)
+ ret = vb2_core_dqbuf(q, &index, NULL, 0);
+ call_void_qop(q, wait_prepare, q);
+ dprintk(q, 5, "file io: vb2_dqbuf result: %d\n", ret);
+ if (!ret)
+ vb = q->bufs[index];
+ }
+ if (ret || threadio->stop)
+ break;
+ try_to_freeze();
+
+ if (vb->state != VB2_BUF_STATE_ERROR)
+ if (threadio->fnc(vb, threadio->priv))
+ break;
+ call_void_qop(q, wait_finish, q);
+ if (copy_timestamp)
+ vb->timestamp = ktime_get_ns();
+ if (!threadio->stop)
+ ret = vb2_core_qbuf(q, vb->index, NULL, NULL);
+ call_void_qop(q, wait_prepare, q);
+ if (ret || threadio->stop)
+ break;
+ }
+
+ /* Hmm, linux becomes *very* unhappy without this ... */
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ schedule();
+ }
+ return 0;
+}
+
+/*
+ * This function should not be used for anything else but the videobuf2-dvb
+ * support. If you think you have another good use-case for this, then please
+ * contact the linux-media mailinglist first.
+ */
+int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
+ const char *thread_name)
+{
+ struct vb2_threadio_data *threadio;
+ int ret = 0;
+
+ if (q->threadio)
+ return -EBUSY;
+ if (vb2_is_busy(q))
+ return -EBUSY;
+ if (WARN_ON(q->fileio))
+ return -EBUSY;
+
+ threadio = kzalloc(sizeof(*threadio), GFP_KERNEL);
+ if (threadio == NULL)
+ return -ENOMEM;
+ threadio->fnc = fnc;
+ threadio->priv = priv;
+
+ ret = __vb2_init_fileio(q, !q->is_output);
+ dprintk(q, 3, "file io: vb2_init_fileio result: %d\n", ret);
+ if (ret)
+ goto nomem;
+ q->threadio = threadio;
+ threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name);
+ if (IS_ERR(threadio->thread)) {
+ ret = PTR_ERR(threadio->thread);
+ threadio->thread = NULL;
+ goto nothread;
+ }
+ return 0;
+
+nothread:
+ __vb2_cleanup_fileio(q);
+nomem:
+ kfree(threadio);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vb2_thread_start);
+
+int vb2_thread_stop(struct vb2_queue *q)
+{
+ struct vb2_threadio_data *threadio = q->threadio;
+ int err;
+
+ if (threadio == NULL)
+ return 0;
+ threadio->stop = true;
+ /* Wake up all pending sleeps in the thread */
+ vb2_queue_error(q);
+ err = kthread_stop(threadio->thread);
+ __vb2_cleanup_fileio(q);
+ threadio->thread = NULL;
+ kfree(threadio);
+ q->threadio = NULL;
+ return err;
+}
+EXPORT_SYMBOL_GPL(vb2_thread_stop);
+
+MODULE_DESCRIPTION("Media buffer core framework");
+MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(DMA_BUF);
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
new file mode 100644
index 000000000..678b35971
--- /dev/null
+++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c
@@ -0,0 +1,872 @@
+/*
+ * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <pawel@osciak.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/dma-buf.h>
+#include <linux/module.h>
+#include <linux/refcount.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+#include <linux/highmem.h>
+
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-dma-contig.h>
+#include <media/videobuf2-memops.h>
+
+struct vb2_dc_buf {
+ struct device *dev;
+ void *vaddr;
+ unsigned long size;
+ void *cookie;
+ dma_addr_t dma_addr;
+ unsigned long attrs;
+ enum dma_data_direction dma_dir;
+ struct sg_table *dma_sgt;
+ struct frame_vector *vec;
+
+ /* MMAP related */
+ struct vb2_vmarea_handler handler;
+ refcount_t refcount;
+ struct sg_table *sgt_base;
+
+ /* DMABUF related */
+ struct dma_buf_attachment *db_attach;
+
+ struct vb2_buffer *vb;
+ bool non_coherent_mem;
+};
+
+/*********************************************/
+/* scatterlist table functions */
+/*********************************************/
+
+static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
+{
+ struct scatterlist *s;
+ dma_addr_t expected = sg_dma_address(sgt->sgl);
+ unsigned int i;
+ unsigned long size = 0;
+
+ for_each_sgtable_dma_sg(sgt, s, i) {
+ if (sg_dma_address(s) != expected)
+ break;
+ expected += sg_dma_len(s);
+ size += sg_dma_len(s);
+ }
+ return size;
+}
+
+/*********************************************/
+/* callbacks for all buffers */
+/*********************************************/
+
+static void *vb2_dc_cookie(struct vb2_buffer *vb, void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ return &buf->dma_addr;
+}
+
+/*
+ * This function may fail if:
+ *
+ * - dma_buf_vmap() fails
+ * E.g. due to lack of virtual mapping address space, or due to
+ * dmabuf->ops misconfiguration.
+ *
+ * - dma_vmap_noncontiguous() fails
+ * For instance, when requested buffer size is larger than totalram_pages().
+ * Relevant for buffers that use non-coherent memory.
+ *
+ * - Queue DMA attrs have DMA_ATTR_NO_KERNEL_MAPPING set
+ * Relevant for buffers that use coherent memory.
+ */
+static void *vb2_dc_vaddr(struct vb2_buffer *vb, void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ if (buf->vaddr)
+ return buf->vaddr;
+
+ if (buf->db_attach) {
+ struct iosys_map map;
+
+ if (!dma_buf_vmap(buf->db_attach->dmabuf, &map))
+ buf->vaddr = map.vaddr;
+
+ return buf->vaddr;
+ }
+
+ if (buf->non_coherent_mem)
+ buf->vaddr = dma_vmap_noncontiguous(buf->dev, buf->size,
+ buf->dma_sgt);
+ return buf->vaddr;
+}
+
+static unsigned int vb2_dc_num_users(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ return refcount_read(&buf->refcount);
+}
+
+static void vb2_dc_prepare(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ /* This takes care of DMABUF and user-enforced cache sync hint */
+ if (buf->vb->skip_cache_sync_on_prepare)
+ return;
+
+ if (!buf->non_coherent_mem)
+ return;
+
+ /* Non-coherent MMAP only */
+ if (buf->vaddr)
+ flush_kernel_vmap_range(buf->vaddr, buf->size);
+
+ /* For both USERPTR and non-coherent MMAP */
+ dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
+}
+
+static void vb2_dc_finish(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ /* This takes care of DMABUF and user-enforced cache sync hint */
+ if (buf->vb->skip_cache_sync_on_finish)
+ return;
+
+ if (!buf->non_coherent_mem)
+ return;
+
+ /* Non-coherent MMAP only */
+ if (buf->vaddr)
+ invalidate_kernel_vmap_range(buf->vaddr, buf->size);
+
+ /* For both USERPTR and non-coherent MMAP */
+ dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
+}
+
+/*********************************************/
+/* callbacks for MMAP buffers */
+/*********************************************/
+
+static void vb2_dc_put(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+
+ if (!refcount_dec_and_test(&buf->refcount))
+ return;
+
+ if (buf->non_coherent_mem) {
+ if (buf->vaddr)
+ dma_vunmap_noncontiguous(buf->dev, buf->vaddr);
+ dma_free_noncontiguous(buf->dev, buf->size,
+ buf->dma_sgt, buf->dma_dir);
+ } else {
+ if (buf->sgt_base) {
+ sg_free_table(buf->sgt_base);
+ kfree(buf->sgt_base);
+ }
+ dma_free_attrs(buf->dev, buf->size, buf->cookie,
+ buf->dma_addr, buf->attrs);
+ }
+ put_device(buf->dev);
+ kfree(buf);
+}
+
+static int vb2_dc_alloc_coherent(struct vb2_dc_buf *buf)
+{
+ struct vb2_queue *q = buf->vb->vb2_queue;
+
+ buf->cookie = dma_alloc_attrs(buf->dev,
+ buf->size,
+ &buf->dma_addr,
+ GFP_KERNEL | q->gfp_flags,
+ buf->attrs);
+ if (!buf->cookie)
+ return -ENOMEM;
+
+ if (q->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
+ return 0;
+
+ buf->vaddr = buf->cookie;
+ return 0;
+}
+
+static int vb2_dc_alloc_non_coherent(struct vb2_dc_buf *buf)
+{
+ struct vb2_queue *q = buf->vb->vb2_queue;
+
+ buf->dma_sgt = dma_alloc_noncontiguous(buf->dev,
+ buf->size,
+ buf->dma_dir,
+ GFP_KERNEL | q->gfp_flags,
+ buf->attrs);
+ if (!buf->dma_sgt)
+ return -ENOMEM;
+
+ buf->dma_addr = sg_dma_address(buf->dma_sgt->sgl);
+
+ /*
+ * For non-coherent buffers the kernel mapping is created on demand
+ * in vb2_dc_vaddr().
+ */
+ return 0;
+}
+
+static void *vb2_dc_alloc(struct vb2_buffer *vb,
+ struct device *dev,
+ unsigned long size)
+{
+ struct vb2_dc_buf *buf;
+ int ret;
+
+ if (WARN_ON(!dev))
+ return ERR_PTR(-EINVAL);
+
+ buf = kzalloc(sizeof *buf, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->attrs = vb->vb2_queue->dma_attrs;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
+ buf->vb = vb;
+ buf->non_coherent_mem = vb->vb2_queue->non_coherent_mem;
+
+ buf->size = size;
+ /* Prevent the device from being released while the buffer is used */
+ buf->dev = get_device(dev);
+
+ if (buf->non_coherent_mem)
+ ret = vb2_dc_alloc_non_coherent(buf);
+ else
+ ret = vb2_dc_alloc_coherent(buf);
+
+ if (ret) {
+ dev_err(dev, "dma alloc of size %lu failed\n", size);
+ kfree(buf);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ buf->handler.refcount = &buf->refcount;
+ buf->handler.put = vb2_dc_put;
+ buf->handler.arg = buf;
+
+ refcount_set(&buf->refcount, 1);
+
+ return buf;
+}
+
+static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ int ret;
+
+ if (!buf) {
+ printk(KERN_ERR "No buffer to map\n");
+ return -EINVAL;
+ }
+
+ if (buf->non_coherent_mem)
+ ret = dma_mmap_noncontiguous(buf->dev, vma, buf->size,
+ buf->dma_sgt);
+ else
+ ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, buf->dma_addr,
+ buf->size, buf->attrs);
+ if (ret) {
+ pr_err("Remapping memory failed, error: %d\n", ret);
+ return ret;
+ }
+
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = &buf->handler;
+ vma->vm_ops = &vb2_common_vm_ops;
+
+ vma->vm_ops->open(vma);
+
+ pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %lu\n",
+ __func__, (unsigned long)buf->dma_addr, vma->vm_start,
+ buf->size);
+
+ return 0;
+}
+
+/*********************************************/
+/* DMABUF ops for exporters */
+/*********************************************/
+
+struct vb2_dc_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dma_dir;
+};
+
+static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
+ struct dma_buf_attachment *dbuf_attach)
+{
+ struct vb2_dc_attachment *attach;
+ unsigned int i;
+ struct scatterlist *rd, *wr;
+ struct sg_table *sgt;
+ struct vb2_dc_buf *buf = dbuf->priv;
+ int ret;
+
+ attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+ if (!attach)
+ return -ENOMEM;
+
+ sgt = &attach->sgt;
+ /* Copy the buf->base_sgt scatter list to the attachment, as we can't
+ * map the same scatter list to multiple attachments at the same time.
+ */
+ ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
+ if (ret) {
+ kfree(attach);
+ return -ENOMEM;
+ }
+
+ rd = buf->sgt_base->sgl;
+ wr = sgt->sgl;
+ for (i = 0; i < sgt->orig_nents; ++i) {
+ sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+ rd = sg_next(rd);
+ wr = sg_next(wr);
+ }
+
+ attach->dma_dir = DMA_NONE;
+ dbuf_attach->priv = attach;
+
+ return 0;
+}
+
+static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
+ struct dma_buf_attachment *db_attach)
+{
+ struct vb2_dc_attachment *attach = db_attach->priv;
+ struct sg_table *sgt;
+
+ if (!attach)
+ return;
+
+ sgt = &attach->sgt;
+
+ /* release the scatterlist cache */
+ if (attach->dma_dir != DMA_NONE)
+ /*
+ * Cache sync can be skipped here, as the vb2_dc memory is
+ * allocated from device coherent memory, which means the
+ * memory locations do not require any explicit cache
+ * maintenance prior or after being used by the device.
+ */
+ dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ sg_free_table(sgt);
+ kfree(attach);
+ db_attach->priv = NULL;
+}
+
+static struct sg_table *vb2_dc_dmabuf_ops_map(
+ struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
+{
+ struct vb2_dc_attachment *attach = db_attach->priv;
+ /* stealing dmabuf mutex to serialize map/unmap operations */
+ struct mutex *lock = &db_attach->dmabuf->lock;
+ struct sg_table *sgt;
+
+ mutex_lock(lock);
+
+ sgt = &attach->sgt;
+ /* return previously mapped sg table */
+ if (attach->dma_dir == dma_dir) {
+ mutex_unlock(lock);
+ return sgt;
+ }
+
+ /* release any previous cache */
+ if (attach->dma_dir != DMA_NONE) {
+ dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ attach->dma_dir = DMA_NONE;
+ }
+
+ /*
+ * mapping to the client with new direction, no cache sync
+ * required see comment in vb2_dc_dmabuf_ops_detach()
+ */
+ if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC)) {
+ pr_err("failed to map scatterlist\n");
+ mutex_unlock(lock);
+ return ERR_PTR(-EIO);
+ }
+
+ attach->dma_dir = dma_dir;
+
+ mutex_unlock(lock);
+
+ return sgt;
+}
+
+static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
+ struct sg_table *sgt, enum dma_data_direction dma_dir)
+{
+ /* nothing to be done here */
+}
+
+static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
+{
+ /* drop reference obtained in vb2_dc_get_dmabuf */
+ vb2_dc_put(dbuf->priv);
+}
+
+static int
+vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
+ enum dma_data_direction direction)
+{
+ return 0;
+}
+
+static int
+vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
+ enum dma_data_direction direction)
+{
+ return 0;
+}
+
+static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct iosys_map *map)
+{
+ struct vb2_dc_buf *buf;
+ void *vaddr;
+
+ buf = dbuf->priv;
+ vaddr = vb2_dc_vaddr(buf->vb, buf);
+ if (!vaddr)
+ return -EINVAL;
+
+ iosys_map_set_vaddr(map, vaddr);
+
+ return 0;
+}
+
+static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
+ struct vm_area_struct *vma)
+{
+ return vb2_dc_mmap(dbuf->priv, vma);
+}
+
+static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
+ .attach = vb2_dc_dmabuf_ops_attach,
+ .detach = vb2_dc_dmabuf_ops_detach,
+ .map_dma_buf = vb2_dc_dmabuf_ops_map,
+ .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
+ .begin_cpu_access = vb2_dc_dmabuf_ops_begin_cpu_access,
+ .end_cpu_access = vb2_dc_dmabuf_ops_end_cpu_access,
+ .vmap = vb2_dc_dmabuf_ops_vmap,
+ .mmap = vb2_dc_dmabuf_ops_mmap,
+ .release = vb2_dc_dmabuf_ops_release,
+};
+
+static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
+{
+ int ret;
+ struct sg_table *sgt;
+
+ if (buf->non_coherent_mem)
+ return buf->dma_sgt;
+
+ sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ dev_err(buf->dev, "failed to alloc sg table\n");
+ return NULL;
+ }
+
+ ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
+ buf->size, buf->attrs);
+ if (ret < 0) {
+ dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
+ kfree(sgt);
+ return NULL;
+ }
+
+ return sgt;
+}
+
+static struct dma_buf *vb2_dc_get_dmabuf(struct vb2_buffer *vb,
+ void *buf_priv,
+ unsigned long flags)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ struct dma_buf *dbuf;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &vb2_dc_dmabuf_ops;
+ exp_info.size = buf->size;
+ exp_info.flags = flags;
+ exp_info.priv = buf;
+
+ if (!buf->sgt_base)
+ buf->sgt_base = vb2_dc_get_base_sgt(buf);
+
+ if (WARN_ON(!buf->sgt_base))
+ return NULL;
+
+ dbuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dbuf))
+ return NULL;
+
+ /* dmabuf keeps reference to vb2 buffer */
+ refcount_inc(&buf->refcount);
+
+ return dbuf;
+}
+
+/*********************************************/
+/* callbacks for USERPTR buffers */
+/*********************************************/
+
+static void vb2_dc_put_userptr(void *buf_priv)
+{
+ struct vb2_dc_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+ int i;
+ struct page **pages;
+
+ if (sgt) {
+ /*
+ * No need to sync to CPU, it's already synced to the CPU
+ * since the finish() memop will have been called before this.
+ */
+ dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ pages = frame_vector_pages(buf->vec);
+ /* sgt should exist only if vector contains pages... */
+ BUG_ON(IS_ERR(pages));
+ if (buf->dma_dir == DMA_FROM_DEVICE ||
+ buf->dma_dir == DMA_BIDIRECTIONAL)
+ for (i = 0; i < frame_vector_count(buf->vec); i++)
+ set_page_dirty_lock(pages[i]);
+ sg_free_table(sgt);
+ kfree(sgt);
+ } else {
+ dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
+ buf->dma_dir, 0);
+ }
+ vb2_destroy_framevec(buf->vec);
+ kfree(buf);
+}
+
+static void *vb2_dc_get_userptr(struct vb2_buffer *vb, struct device *dev,
+ unsigned long vaddr, unsigned long size)
+{
+ struct vb2_dc_buf *buf;
+ struct frame_vector *vec;
+ unsigned int offset;
+ int n_pages, i;
+ int ret = 0;
+ struct sg_table *sgt;
+ unsigned long contig_size;
+ unsigned long dma_align = dma_get_cache_alignment();
+
+ /* Only cache aligned DMA transfers are reliable */
+ if (!IS_ALIGNED(vaddr | size, dma_align)) {
+ pr_debug("user data must be aligned to %lu bytes\n", dma_align);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!size) {
+ pr_debug("size is zero\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (WARN_ON(!dev))
+ return ERR_PTR(-EINVAL);
+
+ buf = kzalloc(sizeof *buf, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->dev = dev;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
+ buf->vb = vb;
+
+ offset = lower_32_bits(offset_in_page(vaddr));
+ vec = vb2_create_framevec(vaddr, size);
+ if (IS_ERR(vec)) {
+ ret = PTR_ERR(vec);
+ goto fail_buf;
+ }
+ buf->vec = vec;
+ n_pages = frame_vector_count(vec);
+ ret = frame_vector_to_pages(vec);
+ if (ret < 0) {
+ unsigned long *nums = frame_vector_pfns(vec);
+
+ /*
+ * Failed to convert to pages... Check the memory is physically
+ * contiguous and use direct mapping
+ */
+ for (i = 1; i < n_pages; i++)
+ if (nums[i-1] + 1 != nums[i])
+ goto fail_pfnvec;
+ buf->dma_addr = dma_map_resource(buf->dev,
+ __pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
+ if (dma_mapping_error(buf->dev, buf->dma_addr)) {
+ ret = -ENOMEM;
+ goto fail_pfnvec;
+ }
+ goto out;
+ }
+
+ sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
+ if (!sgt) {
+ pr_err("failed to allocate sg table\n");
+ ret = -ENOMEM;
+ goto fail_pfnvec;
+ }
+
+ ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
+ offset, size, GFP_KERNEL);
+ if (ret) {
+ pr_err("failed to initialize sg table\n");
+ goto fail_sgt;
+ }
+
+ /*
+ * No need to sync to the device, this will happen later when the
+ * prepare() memop is called.
+ */
+ if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC)) {
+ pr_err("failed to map scatterlist\n");
+ ret = -EIO;
+ goto fail_sgt_init;
+ }
+
+ contig_size = vb2_dc_get_contiguous_size(sgt);
+ if (contig_size < size) {
+ pr_err("contiguous mapping is too small %lu/%lu\n",
+ contig_size, size);
+ ret = -EFAULT;
+ goto fail_map_sg;
+ }
+
+ buf->dma_addr = sg_dma_address(sgt->sgl);
+ buf->dma_sgt = sgt;
+ buf->non_coherent_mem = 1;
+
+out:
+ buf->size = size;
+
+ return buf;
+
+fail_map_sg:
+ dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+
+fail_sgt_init:
+ sg_free_table(sgt);
+
+fail_sgt:
+ kfree(sgt);
+
+fail_pfnvec:
+ vb2_destroy_framevec(vec);
+
+fail_buf:
+ kfree(buf);
+
+ return ERR_PTR(ret);
+}
+
+/*********************************************/
+/* callbacks for DMABUF buffers */
+/*********************************************/
+
+static int vb2_dc_map_dmabuf(void *mem_priv)
+{
+ struct vb2_dc_buf *buf = mem_priv;
+ struct sg_table *sgt;
+ unsigned long contig_size;
+
+ if (WARN_ON(!buf->db_attach)) {
+ pr_err("trying to pin a non attached buffer\n");
+ return -EINVAL;
+ }
+
+ if (WARN_ON(buf->dma_sgt)) {
+ pr_err("dmabuf buffer is already pinned\n");
+ return 0;
+ }
+
+ /* get the associated scatterlist for this buffer */
+ sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
+ if (IS_ERR(sgt)) {
+ pr_err("Error getting dmabuf scatterlist\n");
+ return -EINVAL;
+ }
+
+ /* checking if dmabuf is big enough to store contiguous chunk */
+ contig_size = vb2_dc_get_contiguous_size(sgt);
+ if (contig_size < buf->size) {
+ pr_err("contiguous chunk is too small %lu/%lu\n",
+ contig_size, buf->size);
+ dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
+ return -EFAULT;
+ }
+
+ buf->dma_addr = sg_dma_address(sgt->sgl);
+ buf->dma_sgt = sgt;
+ buf->vaddr = NULL;
+
+ return 0;
+}
+
+static void vb2_dc_unmap_dmabuf(void *mem_priv)
+{
+ struct vb2_dc_buf *buf = mem_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
+
+ if (WARN_ON(!buf->db_attach)) {
+ pr_err("trying to unpin a not attached buffer\n");
+ return;
+ }
+
+ if (WARN_ON(!sgt)) {
+ pr_err("dmabuf buffer is already unpinned\n");
+ return;
+ }
+
+ if (buf->vaddr) {
+ dma_buf_vunmap(buf->db_attach->dmabuf, &map);
+ buf->vaddr = NULL;
+ }
+ dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
+
+ buf->dma_addr = 0;
+ buf->dma_sgt = NULL;
+}
+
+static void vb2_dc_detach_dmabuf(void *mem_priv)
+{
+ struct vb2_dc_buf *buf = mem_priv;
+
+ /* if vb2 works correctly you should never detach mapped buffer */
+ if (WARN_ON(buf->dma_addr))
+ vb2_dc_unmap_dmabuf(buf);
+
+ /* detach this attachment */
+ dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
+ kfree(buf);
+}
+
+static void *vb2_dc_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
+ struct dma_buf *dbuf, unsigned long size)
+{
+ struct vb2_dc_buf *buf;
+ struct dma_buf_attachment *dba;
+
+ if (dbuf->size < size)
+ return ERR_PTR(-EFAULT);
+
+ if (WARN_ON(!dev))
+ return ERR_PTR(-EINVAL);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->dev = dev;
+ buf->vb = vb;
+
+ /* create attachment for the dmabuf with the user device */
+ dba = dma_buf_attach(dbuf, buf->dev);
+ if (IS_ERR(dba)) {
+ pr_err("failed to attach dmabuf\n");
+ kfree(buf);
+ return dba;
+ }
+
+ buf->dma_dir = vb->vb2_queue->dma_dir;
+ buf->size = size;
+ buf->db_attach = dba;
+
+ return buf;
+}
+
+/*********************************************/
+/* DMA CONTIG exported functions */
+/*********************************************/
+
+const struct vb2_mem_ops vb2_dma_contig_memops = {
+ .alloc = vb2_dc_alloc,
+ .put = vb2_dc_put,
+ .get_dmabuf = vb2_dc_get_dmabuf,
+ .cookie = vb2_dc_cookie,
+ .vaddr = vb2_dc_vaddr,
+ .mmap = vb2_dc_mmap,
+ .get_userptr = vb2_dc_get_userptr,
+ .put_userptr = vb2_dc_put_userptr,
+ .prepare = vb2_dc_prepare,
+ .finish = vb2_dc_finish,
+ .map_dmabuf = vb2_dc_map_dmabuf,
+ .unmap_dmabuf = vb2_dc_unmap_dmabuf,
+ .attach_dmabuf = vb2_dc_attach_dmabuf,
+ .detach_dmabuf = vb2_dc_detach_dmabuf,
+ .num_users = vb2_dc_num_users,
+};
+EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
+
+/**
+ * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
+ * @dev: device for configuring DMA parameters
+ * @size: size of DMA max segment size to set
+ *
+ * To allow mapping the scatter-list into a single chunk in the DMA
+ * address space, the device is required to have the DMA max segment
+ * size parameter set to a value larger than the buffer size. Otherwise,
+ * the DMA-mapping subsystem will split the mapping into max segment
+ * size chunks. This function sets the DMA max segment size
+ * parameter to let DMA-mapping map a buffer as a single chunk in DMA
+ * address space.
+ * This code assumes that the DMA-mapping subsystem will merge all
+ * scatterlist segments if this is really possible (for example when
+ * an IOMMU is available and enabled).
+ * Ideally, this parameter should be set by the generic bus code, but it
+ * is left with the default 64KiB value due to historical litmiations in
+ * other subsystems (like limited USB host drivers) and there no good
+ * place to set it to the proper value.
+ * This function should be called from the drivers, which are known to
+ * operate on platforms with IOMMU and provide access to shared buffers
+ * (either USERPTR or DMABUF). This should be done before initializing
+ * videobuf2 queue.
+ */
+int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
+{
+ if (!dev->dma_parms) {
+ dev_err(dev, "Failed to set max_seg_size: dma_parms is NULL\n");
+ return -ENODEV;
+ }
+ if (dma_get_max_seg_size(dev) < size)
+ return dma_set_max_seg_size(dev, size);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
+
+MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
+MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(DMA_BUF);
diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
new file mode 100644
index 000000000..cb1e4bc69
--- /dev/null
+++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c
@@ -0,0 +1,686 @@
+/*
+ * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Andrzej Pietrasiewicz <andrzejtp2010@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/refcount.h>
+#include <linux/scatterlist.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-memops.h>
+#include <media/videobuf2-dma-sg.h>
+
+static int debug;
+module_param(debug, int, 0644);
+
+#define dprintk(level, fmt, arg...) \
+ do { \
+ if (debug >= level) \
+ printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
+ } while (0)
+
+struct vb2_dma_sg_buf {
+ struct device *dev;
+ void *vaddr;
+ struct page **pages;
+ struct frame_vector *vec;
+ int offset;
+ enum dma_data_direction dma_dir;
+ struct sg_table sg_table;
+ /*
+ * This will point to sg_table when used with the MMAP or USERPTR
+ * memory model, and to the dma_buf sglist when used with the
+ * DMABUF memory model.
+ */
+ struct sg_table *dma_sgt;
+ size_t size;
+ unsigned int num_pages;
+ refcount_t refcount;
+ struct vb2_vmarea_handler handler;
+
+ struct dma_buf_attachment *db_attach;
+
+ struct vb2_buffer *vb;
+};
+
+static void vb2_dma_sg_put(void *buf_priv);
+
+static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
+ gfp_t gfp_flags)
+{
+ unsigned int last_page = 0;
+ unsigned long size = buf->size;
+
+ while (size > 0) {
+ struct page *pages;
+ int order;
+ int i;
+
+ order = get_order(size);
+ /* Don't over allocate*/
+ if ((PAGE_SIZE << order) > size)
+ order--;
+
+ pages = NULL;
+ while (!pages) {
+ pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
+ __GFP_NOWARN | gfp_flags, order);
+ if (pages)
+ break;
+
+ if (order == 0) {
+ while (last_page--)
+ __free_page(buf->pages[last_page]);
+ return -ENOMEM;
+ }
+ order--;
+ }
+
+ split_page(pages, order);
+ for (i = 0; i < (1 << order); i++)
+ buf->pages[last_page++] = &pages[i];
+
+ size -= PAGE_SIZE << order;
+ }
+
+ return 0;
+}
+
+static void *vb2_dma_sg_alloc(struct vb2_buffer *vb, struct device *dev,
+ unsigned long size)
+{
+ struct vb2_dma_sg_buf *buf;
+ struct sg_table *sgt;
+ int ret;
+ int num_pages;
+
+ if (WARN_ON(!dev) || WARN_ON(!size))
+ return ERR_PTR(-EINVAL);
+
+ buf = kzalloc(sizeof *buf, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->vaddr = NULL;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
+ buf->offset = 0;
+ buf->size = size;
+ /* size is already page aligned */
+ buf->num_pages = size >> PAGE_SHIFT;
+ buf->dma_sgt = &buf->sg_table;
+
+ /*
+ * NOTE: dma-sg allocates memory using the page allocator directly, so
+ * there is no memory consistency guarantee, hence dma-sg ignores DMA
+ * attributes passed from the upper layer.
+ */
+ buf->pages = kvcalloc(buf->num_pages, sizeof(struct page *), GFP_KERNEL);
+ if (!buf->pages)
+ goto fail_pages_array_alloc;
+
+ ret = vb2_dma_sg_alloc_compacted(buf, vb->vb2_queue->gfp_flags);
+ if (ret)
+ goto fail_pages_alloc;
+
+ ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
+ buf->num_pages, 0, size, GFP_KERNEL);
+ if (ret)
+ goto fail_table_alloc;
+
+ /* Prevent the device from being released while the buffer is used */
+ buf->dev = get_device(dev);
+
+ sgt = &buf->sg_table;
+ /*
+ * No need to sync to the device, this will happen later when the
+ * prepare() memop is called.
+ */
+ if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC))
+ goto fail_map;
+
+ buf->handler.refcount = &buf->refcount;
+ buf->handler.put = vb2_dma_sg_put;
+ buf->handler.arg = buf;
+ buf->vb = vb;
+
+ refcount_set(&buf->refcount, 1);
+
+ dprintk(1, "%s: Allocated buffer of %d pages\n",
+ __func__, buf->num_pages);
+ return buf;
+
+fail_map:
+ put_device(buf->dev);
+ sg_free_table(buf->dma_sgt);
+fail_table_alloc:
+ num_pages = buf->num_pages;
+ while (num_pages--)
+ __free_page(buf->pages[num_pages]);
+fail_pages_alloc:
+ kvfree(buf->pages);
+fail_pages_array_alloc:
+ kfree(buf);
+ return ERR_PTR(-ENOMEM);
+}
+
+static void vb2_dma_sg_put(void *buf_priv)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+ struct sg_table *sgt = &buf->sg_table;
+ int i = buf->num_pages;
+
+ if (refcount_dec_and_test(&buf->refcount)) {
+ dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
+ buf->num_pages);
+ dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
+ if (buf->vaddr)
+ vm_unmap_ram(buf->vaddr, buf->num_pages);
+ sg_free_table(buf->dma_sgt);
+ while (--i >= 0)
+ __free_page(buf->pages[i]);
+ kvfree(buf->pages);
+ put_device(buf->dev);
+ kfree(buf);
+ }
+}
+
+static void vb2_dma_sg_prepare(void *buf_priv)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ if (buf->vb->skip_cache_sync_on_prepare)
+ return;
+
+ dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
+}
+
+static void vb2_dma_sg_finish(void *buf_priv)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ if (buf->vb->skip_cache_sync_on_finish)
+ return;
+
+ dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
+}
+
+static void *vb2_dma_sg_get_userptr(struct vb2_buffer *vb, struct device *dev,
+ unsigned long vaddr, unsigned long size)
+{
+ struct vb2_dma_sg_buf *buf;
+ struct sg_table *sgt;
+ struct frame_vector *vec;
+
+ if (WARN_ON(!dev))
+ return ERR_PTR(-EINVAL);
+
+ buf = kzalloc(sizeof *buf, GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->vaddr = NULL;
+ buf->dev = dev;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
+ buf->offset = vaddr & ~PAGE_MASK;
+ buf->size = size;
+ buf->dma_sgt = &buf->sg_table;
+ buf->vb = vb;
+ vec = vb2_create_framevec(vaddr, size);
+ if (IS_ERR(vec))
+ goto userptr_fail_pfnvec;
+ buf->vec = vec;
+
+ buf->pages = frame_vector_pages(vec);
+ if (IS_ERR(buf->pages))
+ goto userptr_fail_sgtable;
+ buf->num_pages = frame_vector_count(vec);
+
+ if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
+ buf->num_pages, buf->offset, size, 0))
+ goto userptr_fail_sgtable;
+
+ sgt = &buf->sg_table;
+ /*
+ * No need to sync to the device, this will happen later when the
+ * prepare() memop is called.
+ */
+ if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
+ DMA_ATTR_SKIP_CPU_SYNC))
+ goto userptr_fail_map;
+
+ return buf;
+
+userptr_fail_map:
+ sg_free_table(&buf->sg_table);
+userptr_fail_sgtable:
+ vb2_destroy_framevec(vec);
+userptr_fail_pfnvec:
+ kfree(buf);
+ return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * @put_userptr: inform the allocator that a USERPTR buffer will no longer
+ * be used
+ */
+static void vb2_dma_sg_put_userptr(void *buf_priv)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+ struct sg_table *sgt = &buf->sg_table;
+ int i = buf->num_pages;
+
+ dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
+ __func__, buf->num_pages);
+ dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
+ if (buf->vaddr)
+ vm_unmap_ram(buf->vaddr, buf->num_pages);
+ sg_free_table(buf->dma_sgt);
+ if (buf->dma_dir == DMA_FROM_DEVICE ||
+ buf->dma_dir == DMA_BIDIRECTIONAL)
+ while (--i >= 0)
+ set_page_dirty_lock(buf->pages[i]);
+ vb2_destroy_framevec(buf->vec);
+ kfree(buf);
+}
+
+static void *vb2_dma_sg_vaddr(struct vb2_buffer *vb, void *buf_priv)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+ struct iosys_map map;
+ int ret;
+
+ BUG_ON(!buf);
+
+ if (!buf->vaddr) {
+ if (buf->db_attach) {
+ ret = dma_buf_vmap(buf->db_attach->dmabuf, &map);
+ buf->vaddr = ret ? NULL : map.vaddr;
+ } else {
+ buf->vaddr = vm_map_ram(buf->pages, buf->num_pages, -1);
+ }
+ }
+
+ /* add offset in case userptr is not page-aligned */
+ return buf->vaddr ? buf->vaddr + buf->offset : NULL;
+}
+
+static unsigned int vb2_dma_sg_num_users(void *buf_priv)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+
+ return refcount_read(&buf->refcount);
+}
+
+static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+ int err;
+
+ if (!buf) {
+ printk(KERN_ERR "No memory to map\n");
+ return -EINVAL;
+ }
+
+ err = vm_map_pages(vma, buf->pages, buf->num_pages);
+ if (err) {
+ printk(KERN_ERR "Remapping memory, error: %d\n", err);
+ return err;
+ }
+
+ /*
+ * Use common vm_area operations to track buffer refcount.
+ */
+ vma->vm_private_data = &buf->handler;
+ vma->vm_ops = &vb2_common_vm_ops;
+
+ vma->vm_ops->open(vma);
+
+ return 0;
+}
+
+/*********************************************/
+/* DMABUF ops for exporters */
+/*********************************************/
+
+struct vb2_dma_sg_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dma_dir;
+};
+
+static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf,
+ struct dma_buf_attachment *dbuf_attach)
+{
+ struct vb2_dma_sg_attachment *attach;
+ unsigned int i;
+ struct scatterlist *rd, *wr;
+ struct sg_table *sgt;
+ struct vb2_dma_sg_buf *buf = dbuf->priv;
+ int ret;
+
+ attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+ if (!attach)
+ return -ENOMEM;
+
+ sgt = &attach->sgt;
+ /* Copy the buf->base_sgt scatter list to the attachment, as we can't
+ * map the same scatter list to multiple attachments at the same time.
+ */
+ ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL);
+ if (ret) {
+ kfree(attach);
+ return -ENOMEM;
+ }
+
+ rd = buf->dma_sgt->sgl;
+ wr = sgt->sgl;
+ for (i = 0; i < sgt->orig_nents; ++i) {
+ sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
+ rd = sg_next(rd);
+ wr = sg_next(wr);
+ }
+
+ attach->dma_dir = DMA_NONE;
+ dbuf_attach->priv = attach;
+
+ return 0;
+}
+
+static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf,
+ struct dma_buf_attachment *db_attach)
+{
+ struct vb2_dma_sg_attachment *attach = db_attach->priv;
+ struct sg_table *sgt;
+
+ if (!attach)
+ return;
+
+ sgt = &attach->sgt;
+
+ /* release the scatterlist cache */
+ if (attach->dma_dir != DMA_NONE)
+ dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
+ sg_free_table(sgt);
+ kfree(attach);
+ db_attach->priv = NULL;
+}
+
+static struct sg_table *vb2_dma_sg_dmabuf_ops_map(
+ struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
+{
+ struct vb2_dma_sg_attachment *attach = db_attach->priv;
+ /* stealing dmabuf mutex to serialize map/unmap operations */
+ struct mutex *lock = &db_attach->dmabuf->lock;
+ struct sg_table *sgt;
+
+ mutex_lock(lock);
+
+ sgt = &attach->sgt;
+ /* return previously mapped sg table */
+ if (attach->dma_dir == dma_dir) {
+ mutex_unlock(lock);
+ return sgt;
+ }
+
+ /* release any previous cache */
+ if (attach->dma_dir != DMA_NONE) {
+ dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
+ attach->dma_dir = DMA_NONE;
+ }
+
+ /* mapping to the client with new direction */
+ if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
+ pr_err("failed to map scatterlist\n");
+ mutex_unlock(lock);
+ return ERR_PTR(-EIO);
+ }
+
+ attach->dma_dir = dma_dir;
+
+ mutex_unlock(lock);
+
+ return sgt;
+}
+
+static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
+ struct sg_table *sgt, enum dma_data_direction dma_dir)
+{
+ /* nothing to be done here */
+}
+
+static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf)
+{
+ /* drop reference obtained in vb2_dma_sg_get_dmabuf */
+ vb2_dma_sg_put(dbuf->priv);
+}
+
+static int
+vb2_dma_sg_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
+ enum dma_data_direction direction)
+{
+ struct vb2_dma_sg_buf *buf = dbuf->priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+ return 0;
+}
+
+static int
+vb2_dma_sg_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
+ enum dma_data_direction direction)
+{
+ struct vb2_dma_sg_buf *buf = dbuf->priv;
+ struct sg_table *sgt = buf->dma_sgt;
+
+ dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
+ return 0;
+}
+
+static int vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf,
+ struct iosys_map *map)
+{
+ struct vb2_dma_sg_buf *buf;
+ void *vaddr;
+
+ buf = dbuf->priv;
+ vaddr = vb2_dma_sg_vaddr(buf->vb, buf);
+ if (!vaddr)
+ return -EINVAL;
+
+ iosys_map_set_vaddr(map, vaddr);
+
+ return 0;
+}
+
+static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf,
+ struct vm_area_struct *vma)
+{
+ return vb2_dma_sg_mmap(dbuf->priv, vma);
+}
+
+static const struct dma_buf_ops vb2_dma_sg_dmabuf_ops = {
+ .attach = vb2_dma_sg_dmabuf_ops_attach,
+ .detach = vb2_dma_sg_dmabuf_ops_detach,
+ .map_dma_buf = vb2_dma_sg_dmabuf_ops_map,
+ .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap,
+ .begin_cpu_access = vb2_dma_sg_dmabuf_ops_begin_cpu_access,
+ .end_cpu_access = vb2_dma_sg_dmabuf_ops_end_cpu_access,
+ .vmap = vb2_dma_sg_dmabuf_ops_vmap,
+ .mmap = vb2_dma_sg_dmabuf_ops_mmap,
+ .release = vb2_dma_sg_dmabuf_ops_release,
+};
+
+static struct dma_buf *vb2_dma_sg_get_dmabuf(struct vb2_buffer *vb,
+ void *buf_priv,
+ unsigned long flags)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+ struct dma_buf *dbuf;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &vb2_dma_sg_dmabuf_ops;
+ exp_info.size = buf->size;
+ exp_info.flags = flags;
+ exp_info.priv = buf;
+
+ if (WARN_ON(!buf->dma_sgt))
+ return NULL;
+
+ dbuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dbuf))
+ return NULL;
+
+ /* dmabuf keeps reference to vb2 buffer */
+ refcount_inc(&buf->refcount);
+
+ return dbuf;
+}
+
+/*********************************************/
+/* callbacks for DMABUF buffers */
+/*********************************************/
+
+static int vb2_dma_sg_map_dmabuf(void *mem_priv)
+{
+ struct vb2_dma_sg_buf *buf = mem_priv;
+ struct sg_table *sgt;
+
+ if (WARN_ON(!buf->db_attach)) {
+ pr_err("trying to pin a non attached buffer\n");
+ return -EINVAL;
+ }
+
+ if (WARN_ON(buf->dma_sgt)) {
+ pr_err("dmabuf buffer is already pinned\n");
+ return 0;
+ }
+
+ /* get the associated scatterlist for this buffer */
+ sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
+ if (IS_ERR(sgt)) {
+ pr_err("Error getting dmabuf scatterlist\n");
+ return -EINVAL;
+ }
+
+ buf->dma_sgt = sgt;
+ buf->vaddr = NULL;
+
+ return 0;
+}
+
+static void vb2_dma_sg_unmap_dmabuf(void *mem_priv)
+{
+ struct vb2_dma_sg_buf *buf = mem_priv;
+ struct sg_table *sgt = buf->dma_sgt;
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
+
+ if (WARN_ON(!buf->db_attach)) {
+ pr_err("trying to unpin a not attached buffer\n");
+ return;
+ }
+
+ if (WARN_ON(!sgt)) {
+ pr_err("dmabuf buffer is already unpinned\n");
+ return;
+ }
+
+ if (buf->vaddr) {
+ dma_buf_vunmap(buf->db_attach->dmabuf, &map);
+ buf->vaddr = NULL;
+ }
+ dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
+
+ buf->dma_sgt = NULL;
+}
+
+static void vb2_dma_sg_detach_dmabuf(void *mem_priv)
+{
+ struct vb2_dma_sg_buf *buf = mem_priv;
+
+ /* if vb2 works correctly you should never detach mapped buffer */
+ if (WARN_ON(buf->dma_sgt))
+ vb2_dma_sg_unmap_dmabuf(buf);
+
+ /* detach this attachment */
+ dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
+ kfree(buf);
+}
+
+static void *vb2_dma_sg_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
+ struct dma_buf *dbuf, unsigned long size)
+{
+ struct vb2_dma_sg_buf *buf;
+ struct dma_buf_attachment *dba;
+
+ if (WARN_ON(!dev))
+ return ERR_PTR(-EINVAL);
+
+ if (dbuf->size < size)
+ return ERR_PTR(-EFAULT);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->dev = dev;
+ /* create attachment for the dmabuf with the user device */
+ dba = dma_buf_attach(dbuf, buf->dev);
+ if (IS_ERR(dba)) {
+ pr_err("failed to attach dmabuf\n");
+ kfree(buf);
+ return dba;
+ }
+
+ buf->dma_dir = vb->vb2_queue->dma_dir;
+ buf->size = size;
+ buf->db_attach = dba;
+ buf->vb = vb;
+
+ return buf;
+}
+
+static void *vb2_dma_sg_cookie(struct vb2_buffer *vb, void *buf_priv)
+{
+ struct vb2_dma_sg_buf *buf = buf_priv;
+
+ return buf->dma_sgt;
+}
+
+const struct vb2_mem_ops vb2_dma_sg_memops = {
+ .alloc = vb2_dma_sg_alloc,
+ .put = vb2_dma_sg_put,
+ .get_userptr = vb2_dma_sg_get_userptr,
+ .put_userptr = vb2_dma_sg_put_userptr,
+ .prepare = vb2_dma_sg_prepare,
+ .finish = vb2_dma_sg_finish,
+ .vaddr = vb2_dma_sg_vaddr,
+ .mmap = vb2_dma_sg_mmap,
+ .num_users = vb2_dma_sg_num_users,
+ .get_dmabuf = vb2_dma_sg_get_dmabuf,
+ .map_dmabuf = vb2_dma_sg_map_dmabuf,
+ .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf,
+ .attach_dmabuf = vb2_dma_sg_attach_dmabuf,
+ .detach_dmabuf = vb2_dma_sg_detach_dmabuf,
+ .cookie = vb2_dma_sg_cookie,
+};
+EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
+
+MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
+MODULE_AUTHOR("Andrzej Pietrasiewicz");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(DMA_BUF);
diff --git a/drivers/media/common/videobuf2/videobuf2-dvb.c b/drivers/media/common/videobuf2/videobuf2-dvb.c
new file mode 100644
index 000000000..8c15bcd07
--- /dev/null
+++ b/drivers/media/common/videobuf2/videobuf2-dvb.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *
+ * some helper function for simple DVB cards which simply DMA the
+ * complete transport stream and let the computer sort everything else
+ * (i.e. we are using the software demux, ...). Also uses vb2
+ * to manage DMA buffers.
+ *
+ * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+
+#include <media/videobuf2-dvb.h>
+
+/* ------------------------------------------------------------------ */
+
+MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
+MODULE_LICENSE("GPL");
+
+/* ------------------------------------------------------------------ */
+
+static int dvb_fnc(struct vb2_buffer *vb, void *priv)
+{
+ struct vb2_dvb *dvb = priv;
+
+ dvb_dmx_swfilter(&dvb->demux, vb2_plane_vaddr(vb, 0),
+ vb2_get_plane_payload(vb, 0));
+ return 0;
+}
+
+static int vb2_dvb_start_feed(struct dvb_demux_feed *feed)
+{
+ struct dvb_demux *demux = feed->demux;
+ struct vb2_dvb *dvb = demux->priv;
+ int rc = 0;
+
+ if (!demux->dmx.frontend)
+ return -EINVAL;
+
+ mutex_lock(&dvb->lock);
+ dvb->nfeeds++;
+
+ if (!dvb->dvbq.threadio) {
+ rc = vb2_thread_start(&dvb->dvbq, dvb_fnc, dvb, dvb->name);
+ if (rc)
+ dvb->nfeeds--;
+ }
+ if (!rc)
+ rc = dvb->nfeeds;
+ mutex_unlock(&dvb->lock);
+ return rc;
+}
+
+static int vb2_dvb_stop_feed(struct dvb_demux_feed *feed)
+{
+ struct dvb_demux *demux = feed->demux;
+ struct vb2_dvb *dvb = demux->priv;
+ int err = 0;
+
+ mutex_lock(&dvb->lock);
+ dvb->nfeeds--;
+ if (0 == dvb->nfeeds)
+ err = vb2_thread_stop(&dvb->dvbq);
+ mutex_unlock(&dvb->lock);
+ return err;
+}
+
+static int vb2_dvb_register_adapter(struct vb2_dvb_frontends *fe,
+ struct module *module,
+ void *adapter_priv,
+ struct device *device,
+ struct media_device *mdev,
+ char *adapter_name,
+ short *adapter_nr,
+ int mfe_shared)
+{
+ int result;
+
+ mutex_init(&fe->lock);
+
+ /* register adapter */
+ result = dvb_register_adapter(&fe->adapter, adapter_name, module,
+ device, adapter_nr);
+ if (result < 0) {
+ pr_warn("%s: dvb_register_adapter failed (errno = %d)\n",
+ adapter_name, result);
+ }
+ fe->adapter.priv = adapter_priv;
+ fe->adapter.mfe_shared = mfe_shared;
+#ifdef CONFIG_MEDIA_CONTROLLER_DVB
+ if (mdev)
+ fe->adapter.mdev = mdev;
+#endif
+ return result;
+}
+
+static int vb2_dvb_register_frontend(struct dvb_adapter *adapter,
+ struct vb2_dvb *dvb)
+{
+ int result;
+
+ /* register frontend */
+ result = dvb_register_frontend(adapter, dvb->frontend);
+ if (result < 0) {
+ pr_warn("%s: dvb_register_frontend failed (errno = %d)\n",
+ dvb->name, result);
+ goto fail_frontend;
+ }
+
+ /* register demux stuff */
+ dvb->demux.dmx.capabilities =
+ DMX_TS_FILTERING | DMX_SECTION_FILTERING |
+ DMX_MEMORY_BASED_FILTERING;
+ dvb->demux.priv = dvb;
+ dvb->demux.filternum = 256;
+ dvb->demux.feednum = 256;
+ dvb->demux.start_feed = vb2_dvb_start_feed;
+ dvb->demux.stop_feed = vb2_dvb_stop_feed;
+ result = dvb_dmx_init(&dvb->demux);
+ if (result < 0) {
+ pr_warn("%s: dvb_dmx_init failed (errno = %d)\n",
+ dvb->name, result);
+ goto fail_dmx;
+ }
+
+ dvb->dmxdev.filternum = 256;
+ dvb->dmxdev.demux = &dvb->demux.dmx;
+ dvb->dmxdev.capabilities = 0;
+ result = dvb_dmxdev_init(&dvb->dmxdev, adapter);
+
+ if (result < 0) {
+ pr_warn("%s: dvb_dmxdev_init failed (errno = %d)\n",
+ dvb->name, result);
+ goto fail_dmxdev;
+ }
+
+ dvb->fe_hw.source = DMX_FRONTEND_0;
+ result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_hw);
+ if (result < 0) {
+ pr_warn("%s: add_frontend failed (DMX_FRONTEND_0, errno = %d)\n",
+ dvb->name, result);
+ goto fail_fe_hw;
+ }
+
+ dvb->fe_mem.source = DMX_MEMORY_FE;
+ result = dvb->demux.dmx.add_frontend(&dvb->demux.dmx, &dvb->fe_mem);
+ if (result < 0) {
+ pr_warn("%s: add_frontend failed (DMX_MEMORY_FE, errno = %d)\n",
+ dvb->name, result);
+ goto fail_fe_mem;
+ }
+
+ result = dvb->demux.dmx.connect_frontend(&dvb->demux.dmx, &dvb->fe_hw);
+ if (result < 0) {
+ pr_warn("%s: connect_frontend failed (errno = %d)\n",
+ dvb->name, result);
+ goto fail_fe_conn;
+ }
+
+ /* register network adapter */
+ result = dvb_net_init(adapter, &dvb->net, &dvb->demux.dmx);
+ if (result < 0) {
+ pr_warn("%s: dvb_net_init failed (errno = %d)\n",
+ dvb->name, result);
+ goto fail_fe_conn;
+ }
+ return 0;
+
+fail_fe_conn:
+ dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_mem);
+fail_fe_mem:
+ dvb->demux.dmx.remove_frontend(&dvb->demux.dmx, &dvb->fe_hw);
+fail_fe_hw:
+ dvb_dmxdev_release(&dvb->dmxdev);
+fail_dmxdev:
+ dvb_dmx_release(&dvb->demux);
+fail_dmx:
+ dvb_unregister_frontend(dvb->frontend);
+fail_frontend:
+ dvb_frontend_detach(dvb->frontend);
+ dvb->frontend = NULL;
+
+ return result;
+}
+
+/* ------------------------------------------------------------------ */
+/* Register a single adapter and one or more frontends */
+int vb2_dvb_register_bus(struct vb2_dvb_frontends *f,
+ struct module *module,
+ void *adapter_priv,
+ struct device *device,
+ struct media_device *mdev,
+ short *adapter_nr,
+ int mfe_shared)
+{
+ struct list_head *list, *q;
+ struct vb2_dvb_frontend *fe;
+ int res;
+
+ fe = vb2_dvb_get_frontend(f, 1);
+ if (!fe) {
+ pr_warn("Unable to register the adapter which has no frontends\n");
+ return -EINVAL;
+ }
+
+ /* Bring up the adapter */
+ res = vb2_dvb_register_adapter(f, module, adapter_priv, device, mdev,
+ fe->dvb.name, adapter_nr, mfe_shared);
+ if (res < 0) {
+ pr_warn("vb2_dvb_register_adapter failed (errno = %d)\n", res);
+ return res;
+ }
+
+ /* Attach all of the frontends to the adapter */
+ mutex_lock(&f->lock);
+ list_for_each_safe(list, q, &f->felist) {
+ fe = list_entry(list, struct vb2_dvb_frontend, felist);
+ res = vb2_dvb_register_frontend(&f->adapter, &fe->dvb);
+ if (res < 0) {
+ pr_warn("%s: vb2_dvb_register_frontend failed (errno = %d)\n",
+ fe->dvb.name, res);
+ goto err;
+ }
+ res = dvb_create_media_graph(&f->adapter, false);
+ if (res < 0)
+ goto err;
+ }
+
+ mutex_unlock(&f->lock);
+ return 0;
+
+err:
+ mutex_unlock(&f->lock);
+ vb2_dvb_unregister_bus(f);
+ return res;
+}
+EXPORT_SYMBOL(vb2_dvb_register_bus);
+
+void vb2_dvb_unregister_bus(struct vb2_dvb_frontends *f)
+{
+ vb2_dvb_dealloc_frontends(f);
+
+ dvb_unregister_adapter(&f->adapter);
+}
+EXPORT_SYMBOL(vb2_dvb_unregister_bus);
+
+struct vb2_dvb_frontend *vb2_dvb_get_frontend(
+ struct vb2_dvb_frontends *f, int id)
+{
+ struct list_head *list, *q;
+ struct vb2_dvb_frontend *fe, *ret = NULL;
+
+ mutex_lock(&f->lock);
+
+ list_for_each_safe(list, q, &f->felist) {
+ fe = list_entry(list, struct vb2_dvb_frontend, felist);
+ if (fe->id == id) {
+ ret = fe;
+ break;
+ }
+ }
+
+ mutex_unlock(&f->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(vb2_dvb_get_frontend);
+
+int vb2_dvb_find_frontend(struct vb2_dvb_frontends *f,
+ struct dvb_frontend *p)
+{
+ struct list_head *list, *q;
+ struct vb2_dvb_frontend *fe = NULL;
+ int ret = 0;
+
+ mutex_lock(&f->lock);
+
+ list_for_each_safe(list, q, &f->felist) {
+ fe = list_entry(list, struct vb2_dvb_frontend, felist);
+ if (fe->dvb.frontend == p) {
+ ret = fe->id;
+ break;
+ }
+ }
+
+ mutex_unlock(&f->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(vb2_dvb_find_frontend);
+
+struct vb2_dvb_frontend *vb2_dvb_alloc_frontend(
+ struct vb2_dvb_frontends *f, int id)
+{
+ struct vb2_dvb_frontend *fe;
+
+ fe = kzalloc(sizeof(struct vb2_dvb_frontend), GFP_KERNEL);
+ if (fe == NULL)
+ return NULL;
+
+ fe->id = id;
+ mutex_init(&fe->dvb.lock);
+
+ mutex_lock(&f->lock);
+ list_add_tail(&fe->felist, &f->felist);
+ mutex_unlock(&f->lock);
+ return fe;
+}
+EXPORT_SYMBOL(vb2_dvb_alloc_frontend);
+
+void vb2_dvb_dealloc_frontends(struct vb2_dvb_frontends *f)
+{
+ struct list_head *list, *q;
+ struct vb2_dvb_frontend *fe;
+
+ mutex_lock(&f->lock);
+ list_for_each_safe(list, q, &f->felist) {
+ fe = list_entry(list, struct vb2_dvb_frontend, felist);
+ if (fe->dvb.net.dvbdev) {
+ dvb_net_release(&fe->dvb.net);
+ fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
+ &fe->dvb.fe_mem);
+ fe->dvb.demux.dmx.remove_frontend(&fe->dvb.demux.dmx,
+ &fe->dvb.fe_hw);
+ dvb_dmxdev_release(&fe->dvb.dmxdev);
+ dvb_dmx_release(&fe->dvb.demux);
+ dvb_unregister_frontend(fe->dvb.frontend);
+ }
+ if (fe->dvb.frontend)
+ /* always allocated, may have been reset */
+ dvb_frontend_detach(fe->dvb.frontend);
+ list_del(list); /* remove list entry */
+ kfree(fe); /* free frontend allocation */
+ }
+ mutex_unlock(&f->lock);
+}
+EXPORT_SYMBOL(vb2_dvb_dealloc_frontends);
diff --git a/drivers/media/common/videobuf2/videobuf2-memops.c b/drivers/media/common/videobuf2/videobuf2-memops.c
new file mode 100644
index 000000000..9dd6c2716
--- /dev/null
+++ b/drivers/media/common/videobuf2/videobuf2-memops.c
@@ -0,0 +1,129 @@
+/*
+ * videobuf2-memops.c - generic memory handling routines for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <pawel@osciak.com>
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/dma-mapping.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-memops.h>
+
+/**
+ * vb2_create_framevec() - map virtual addresses to pfns
+ * @start: Virtual user address where we start mapping
+ * @length: Length of a range to map
+ *
+ * This function allocates and fills in a vector with pfns corresponding to
+ * virtual address range passed in arguments. If pfns have corresponding pages,
+ * page references are also grabbed to pin pages in memory. The function
+ * returns pointer to the vector on success and error pointer in case of
+ * failure. Returned vector needs to be freed via vb2_destroy_pfnvec().
+ */
+struct frame_vector *vb2_create_framevec(unsigned long start,
+ unsigned long length)
+{
+ int ret;
+ unsigned long first, last;
+ unsigned long nr;
+ struct frame_vector *vec;
+
+ first = start >> PAGE_SHIFT;
+ last = (start + length - 1) >> PAGE_SHIFT;
+ nr = last - first + 1;
+ vec = frame_vector_create(nr);
+ if (!vec)
+ return ERR_PTR(-ENOMEM);
+ ret = get_vaddr_frames(start & PAGE_MASK, nr, vec);
+ if (ret < 0)
+ goto out_destroy;
+ /* We accept only complete set of PFNs */
+ if (ret != nr) {
+ ret = -EFAULT;
+ goto out_release;
+ }
+ return vec;
+out_release:
+ put_vaddr_frames(vec);
+out_destroy:
+ frame_vector_destroy(vec);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(vb2_create_framevec);
+
+/**
+ * vb2_destroy_framevec() - release vector of mapped pfns
+ * @vec: vector of pfns / pages to release
+ *
+ * This releases references to all pages in the vector @vec (if corresponding
+ * pfns are backed by pages) and frees the passed vector.
+ */
+void vb2_destroy_framevec(struct frame_vector *vec)
+{
+ put_vaddr_frames(vec);
+ frame_vector_destroy(vec);
+}
+EXPORT_SYMBOL(vb2_destroy_framevec);
+
+/**
+ * vb2_common_vm_open() - increase refcount of the vma
+ * @vma: virtual memory region for the mapping
+ *
+ * This function adds another user to the provided vma. It expects
+ * struct vb2_vmarea_handler pointer in vma->vm_private_data.
+ */
+static void vb2_common_vm_open(struct vm_area_struct *vma)
+{
+ struct vb2_vmarea_handler *h = vma->vm_private_data;
+
+ pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
+ __func__, h, refcount_read(h->refcount), vma->vm_start,
+ vma->vm_end);
+
+ refcount_inc(h->refcount);
+}
+
+/**
+ * vb2_common_vm_close() - decrease refcount of the vma
+ * @vma: virtual memory region for the mapping
+ *
+ * This function releases the user from the provided vma. It expects
+ * struct vb2_vmarea_handler pointer in vma->vm_private_data.
+ */
+static void vb2_common_vm_close(struct vm_area_struct *vma)
+{
+ struct vb2_vmarea_handler *h = vma->vm_private_data;
+
+ pr_debug("%s: %p, refcount: %d, vma: %08lx-%08lx\n",
+ __func__, h, refcount_read(h->refcount), vma->vm_start,
+ vma->vm_end);
+
+ h->put(h->arg);
+}
+
+/*
+ * vb2_common_vm_ops - common vm_ops used for tracking refcount of mmapped
+ * video buffers
+ */
+const struct vm_operations_struct vb2_common_vm_ops = {
+ .open = vb2_common_vm_open,
+ .close = vb2_common_vm_close,
+};
+EXPORT_SYMBOL_GPL(vb2_common_vm_ops);
+
+MODULE_DESCRIPTION("common memory handling routines for videobuf2");
+MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c
new file mode 100644
index 000000000..1f5d235a8
--- /dev/null
+++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c
@@ -0,0 +1,1331 @@
+/*
+ * videobuf2-v4l2.c - V4L2 driver helper framework
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <pawel@osciak.com>
+ * Marek Szyprowski <m.szyprowski@samsung.com>
+ *
+ * The vb2_thread implementation was based on code from videobuf-dvb.c:
+ * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/freezer.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <media/v4l2-common.h>
+#include <media/v4l2-dev.h>
+#include <media/v4l2-device.h>
+#include <media/v4l2-event.h>
+#include <media/v4l2-fh.h>
+
+#include <media/videobuf2-v4l2.h>
+
+static int debug;
+module_param(debug, int, 0644);
+
+#define dprintk(q, level, fmt, arg...) \
+ do { \
+ if (debug >= level) \
+ pr_info("vb2-v4l2: [%p] %s: " fmt, \
+ (q)->name, __func__, ## arg); \
+ } while (0)
+
+/* Flags that are set by us */
+#define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
+ V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
+ V4L2_BUF_FLAG_PREPARED | \
+ V4L2_BUF_FLAG_IN_REQUEST | \
+ V4L2_BUF_FLAG_REQUEST_FD | \
+ V4L2_BUF_FLAG_TIMESTAMP_MASK)
+/* Output buffer flags that should be passed on to the driver */
+#define V4L2_BUFFER_OUT_FLAGS (V4L2_BUF_FLAG_PFRAME | \
+ V4L2_BUF_FLAG_BFRAME | \
+ V4L2_BUF_FLAG_KEYFRAME | \
+ V4L2_BUF_FLAG_TIMECODE | \
+ V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF)
+
+/*
+ * __verify_planes_array() - verify that the planes array passed in struct
+ * v4l2_buffer from userspace can be safely used
+ */
+static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+ if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
+ return 0;
+
+ /* Is memory for copying plane information present? */
+ if (b->m.planes == NULL) {
+ dprintk(vb->vb2_queue, 1,
+ "multi-planar buffer passed but planes array not provided\n");
+ return -EINVAL;
+ }
+
+ if (b->length < vb->num_planes || b->length > VB2_MAX_PLANES) {
+ dprintk(vb->vb2_queue, 1,
+ "incorrect planes array length, expected %d, got %d\n",
+ vb->num_planes, b->length);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
+{
+ return __verify_planes_array(vb, pb);
+}
+
+/*
+ * __verify_length() - Verify that the bytesused value for each plane fits in
+ * the plane length and that the data offset doesn't exceed the bytesused value.
+ */
+static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
+{
+ unsigned int length;
+ unsigned int bytesused;
+ unsigned int plane;
+
+ if (V4L2_TYPE_IS_CAPTURE(b->type))
+ return 0;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ length = (b->memory == VB2_MEMORY_USERPTR ||
+ b->memory == VB2_MEMORY_DMABUF)
+ ? b->m.planes[plane].length
+ : vb->planes[plane].length;
+ bytesused = b->m.planes[plane].bytesused
+ ? b->m.planes[plane].bytesused : length;
+
+ if (b->m.planes[plane].bytesused > length)
+ return -EINVAL;
+
+ if (b->m.planes[plane].data_offset > 0 &&
+ b->m.planes[plane].data_offset >= bytesused)
+ return -EINVAL;
+ }
+ } else {
+ length = (b->memory == VB2_MEMORY_USERPTR)
+ ? b->length : vb->planes[0].length;
+
+ if (b->bytesused > length)
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * __init_vb2_v4l2_buffer() - initialize the vb2_v4l2_buffer struct
+ */
+static void __init_vb2_v4l2_buffer(struct vb2_buffer *vb)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+
+ vbuf->request_fd = -1;
+}
+
+static void __copy_timestamp(struct vb2_buffer *vb, const void *pb)
+{
+ const struct v4l2_buffer *b = pb;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vb2_queue *q = vb->vb2_queue;
+
+ if (q->is_output) {
+ /*
+ * For output buffers copy the timestamp if needed,
+ * and the timecode field and flag if needed.
+ */
+ if (q->copy_timestamp)
+ vb->timestamp = v4l2_buffer_get_timestamp(b);
+ vbuf->flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
+ if (b->flags & V4L2_BUF_FLAG_TIMECODE)
+ vbuf->timecode = b->timecode;
+ }
+};
+
+static void vb2_warn_zero_bytesused(struct vb2_buffer *vb)
+{
+ static bool check_once;
+
+ if (check_once)
+ return;
+
+ check_once = true;
+
+ pr_warn("use of bytesused == 0 is deprecated and will be removed in the future,\n");
+ if (vb->vb2_queue->allow_zero_bytesused)
+ pr_warn("use VIDIOC_DECODER_CMD(V4L2_DEC_CMD_STOP) instead.\n");
+ else
+ pr_warn("use the actual size instead.\n");
+}
+
+static int vb2_fill_vb2_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
+{
+ struct vb2_queue *q = vb->vb2_queue;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vb2_plane *planes = vbuf->planes;
+ unsigned int plane;
+ int ret;
+
+ ret = __verify_length(vb, b);
+ if (ret < 0) {
+ dprintk(q, 1, "plane parameters verification failed: %d\n", ret);
+ return ret;
+ }
+ if (b->field == V4L2_FIELD_ALTERNATE && q->is_output) {
+ /*
+ * If the format's field is ALTERNATE, then the buffer's field
+ * should be either TOP or BOTTOM, not ALTERNATE since that
+ * makes no sense. The driver has to know whether the
+ * buffer represents a top or a bottom field in order to
+ * program any DMA correctly. Using ALTERNATE is wrong, since
+ * that just says that it is either a top or a bottom field,
+ * but not which of the two it is.
+ */
+ dprintk(q, 1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
+ return -EINVAL;
+ }
+ vbuf->sequence = 0;
+ vbuf->request_fd = -1;
+ vbuf->is_held = false;
+
+ if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
+ switch (b->memory) {
+ case VB2_MEMORY_USERPTR:
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ planes[plane].m.userptr =
+ b->m.planes[plane].m.userptr;
+ planes[plane].length =
+ b->m.planes[plane].length;
+ }
+ break;
+ case VB2_MEMORY_DMABUF:
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ planes[plane].m.fd =
+ b->m.planes[plane].m.fd;
+ planes[plane].length =
+ b->m.planes[plane].length;
+ }
+ break;
+ default:
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ planes[plane].m.offset =
+ vb->planes[plane].m.offset;
+ planes[plane].length =
+ vb->planes[plane].length;
+ }
+ break;
+ }
+
+ /* Fill in driver-provided information for OUTPUT types */
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ /*
+ * Will have to go up to b->length when API starts
+ * accepting variable number of planes.
+ *
+ * If bytesused == 0 for the output buffer, then fall
+ * back to the full buffer size. In that case
+ * userspace clearly never bothered to set it and
+ * it's a safe assumption that they really meant to
+ * use the full plane sizes.
+ *
+ * Some drivers, e.g. old codec drivers, use bytesused == 0
+ * as a way to indicate that streaming is finished.
+ * In that case, the driver should use the
+ * allow_zero_bytesused flag to keep old userspace
+ * applications working.
+ */
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ struct vb2_plane *pdst = &planes[plane];
+ struct v4l2_plane *psrc = &b->m.planes[plane];
+
+ if (psrc->bytesused == 0)
+ vb2_warn_zero_bytesused(vb);
+
+ if (vb->vb2_queue->allow_zero_bytesused)
+ pdst->bytesused = psrc->bytesused;
+ else
+ pdst->bytesused = psrc->bytesused ?
+ psrc->bytesused : pdst->length;
+ pdst->data_offset = psrc->data_offset;
+ }
+ }
+ } else {
+ /*
+ * Single-planar buffers do not use planes array,
+ * so fill in relevant v4l2_buffer struct fields instead.
+ * In vb2 we use our internal V4l2_planes struct for
+ * single-planar buffers as well, for simplicity.
+ *
+ * If bytesused == 0 for the output buffer, then fall back
+ * to the full buffer size as that's a sensible default.
+ *
+ * Some drivers, e.g. old codec drivers, use bytesused == 0 as
+ * a way to indicate that streaming is finished. In that case,
+ * the driver should use the allow_zero_bytesused flag to keep
+ * old userspace applications working.
+ */
+ switch (b->memory) {
+ case VB2_MEMORY_USERPTR:
+ planes[0].m.userptr = b->m.userptr;
+ planes[0].length = b->length;
+ break;
+ case VB2_MEMORY_DMABUF:
+ planes[0].m.fd = b->m.fd;
+ planes[0].length = b->length;
+ break;
+ default:
+ planes[0].m.offset = vb->planes[0].m.offset;
+ planes[0].length = vb->planes[0].length;
+ break;
+ }
+
+ planes[0].data_offset = 0;
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ if (b->bytesused == 0)
+ vb2_warn_zero_bytesused(vb);
+
+ if (vb->vb2_queue->allow_zero_bytesused)
+ planes[0].bytesused = b->bytesused;
+ else
+ planes[0].bytesused = b->bytesused ?
+ b->bytesused : planes[0].length;
+ } else
+ planes[0].bytesused = 0;
+
+ }
+
+ /* Zero flags that we handle */
+ vbuf->flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
+ if (!vb->vb2_queue->copy_timestamp || V4L2_TYPE_IS_CAPTURE(b->type)) {
+ /*
+ * Non-COPY timestamps and non-OUTPUT queues will get
+ * their timestamp and timestamp source flags from the
+ * queue.
+ */
+ vbuf->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ }
+
+ if (V4L2_TYPE_IS_OUTPUT(b->type)) {
+ /*
+ * For output buffers mask out the timecode flag:
+ * this will be handled later in vb2_qbuf().
+ * The 'field' is valid metadata for this output buffer
+ * and so that needs to be copied here.
+ */
+ vbuf->flags &= ~V4L2_BUF_FLAG_TIMECODE;
+ vbuf->field = b->field;
+ if (!(q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF))
+ vbuf->flags &= ~V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF;
+ } else {
+ /* Zero any output buffer flags as this is a capture buffer */
+ vbuf->flags &= ~V4L2_BUFFER_OUT_FLAGS;
+ /* Zero last flag, this is a signal from driver to userspace */
+ vbuf->flags &= ~V4L2_BUF_FLAG_LAST;
+ }
+
+ return 0;
+}
+
+static void set_buffer_cache_hints(struct vb2_queue *q,
+ struct vb2_buffer *vb,
+ struct v4l2_buffer *b)
+{
+ if (!vb2_queue_allows_cache_hints(q)) {
+ /*
+ * Clear buffer cache flags if queue does not support user
+ * space hints. That's to indicate to userspace that these
+ * flags won't work.
+ */
+ b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_INVALIDATE;
+ b->flags &= ~V4L2_BUF_FLAG_NO_CACHE_CLEAN;
+ return;
+ }
+
+ if (b->flags & V4L2_BUF_FLAG_NO_CACHE_INVALIDATE)
+ vb->skip_cache_sync_on_finish = 1;
+
+ if (b->flags & V4L2_BUF_FLAG_NO_CACHE_CLEAN)
+ vb->skip_cache_sync_on_prepare = 1;
+}
+
+static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
+ struct v4l2_buffer *b, bool is_prepare,
+ struct media_request **p_req)
+{
+ const char *opname = is_prepare ? "prepare_buf" : "qbuf";
+ struct media_request *req;
+ struct vb2_v4l2_buffer *vbuf;
+ struct vb2_buffer *vb;
+ int ret;
+
+ if (b->type != q->type) {
+ dprintk(q, 1, "%s: invalid buffer type\n", opname);
+ return -EINVAL;
+ }
+
+ if (b->index >= q->num_buffers) {
+ dprintk(q, 1, "%s: buffer index out of range\n", opname);
+ return -EINVAL;
+ }
+
+ if (q->bufs[b->index] == NULL) {
+ /* Should never happen */
+ dprintk(q, 1, "%s: buffer is NULL\n", opname);
+ return -EINVAL;
+ }
+
+ if (b->memory != q->memory) {
+ dprintk(q, 1, "%s: invalid memory type\n", opname);
+ return -EINVAL;
+ }
+
+ vb = q->bufs[b->index];
+ vbuf = to_vb2_v4l2_buffer(vb);
+ ret = __verify_planes_array(vb, b);
+ if (ret)
+ return ret;
+
+ if (!is_prepare && (b->flags & V4L2_BUF_FLAG_REQUEST_FD) &&
+ vb->state != VB2_BUF_STATE_DEQUEUED) {
+ dprintk(q, 1, "%s: buffer is not in dequeued state\n", opname);
+ return -EINVAL;
+ }
+
+ if (!vb->prepared) {
+ set_buffer_cache_hints(q, vb, b);
+ /* Copy relevant information provided by the userspace */
+ memset(vbuf->planes, 0,
+ sizeof(vbuf->planes[0]) * vb->num_planes);
+ ret = vb2_fill_vb2_v4l2_buffer(vb, b);
+ if (ret)
+ return ret;
+ }
+
+ if (is_prepare)
+ return 0;
+
+ if (!(b->flags & V4L2_BUF_FLAG_REQUEST_FD)) {
+ if (q->requires_requests) {
+ dprintk(q, 1, "%s: queue requires requests\n", opname);
+ return -EBADR;
+ }
+ if (q->uses_requests) {
+ dprintk(q, 1, "%s: queue uses requests\n", opname);
+ return -EBUSY;
+ }
+ return 0;
+ } else if (!q->supports_requests) {
+ dprintk(q, 1, "%s: queue does not support requests\n", opname);
+ return -EBADR;
+ } else if (q->uses_qbuf) {
+ dprintk(q, 1, "%s: queue does not use requests\n", opname);
+ return -EBUSY;
+ }
+
+ /*
+ * For proper locking when queueing a request you need to be able
+ * to lock access to the vb2 queue, so check that there is a lock
+ * that we can use. In addition p_req must be non-NULL.
+ */
+ if (WARN_ON(!q->lock || !p_req))
+ return -EINVAL;
+
+ /*
+ * Make sure this op is implemented by the driver. It's easy to forget
+ * this callback, but is it important when canceling a buffer in a
+ * queued request.
+ */
+ if (WARN_ON(!q->ops->buf_request_complete))
+ return -EINVAL;
+ /*
+ * Make sure this op is implemented by the driver for the output queue.
+ * It's easy to forget this callback, but is it important to correctly
+ * validate the 'field' value at QBUF time.
+ */
+ if (WARN_ON((q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT ||
+ q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) &&
+ !q->ops->buf_out_validate))
+ return -EINVAL;
+
+ req = media_request_get_by_fd(mdev, b->request_fd);
+ if (IS_ERR(req)) {
+ dprintk(q, 1, "%s: invalid request_fd\n", opname);
+ return PTR_ERR(req);
+ }
+
+ /*
+ * Early sanity check. This is checked again when the buffer
+ * is bound to the request in vb2_core_qbuf().
+ */
+ if (req->state != MEDIA_REQUEST_STATE_IDLE &&
+ req->state != MEDIA_REQUEST_STATE_UPDATING) {
+ dprintk(q, 1, "%s: request is not idle\n", opname);
+ media_request_put(req);
+ return -EBUSY;
+ }
+
+ *p_req = req;
+ vbuf->request_fd = b->request_fd;
+
+ return 0;
+}
+
+/*
+ * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
+ * returned to userspace
+ */
+static void __fill_v4l2_buffer(struct vb2_buffer *vb, void *pb)
+{
+ struct v4l2_buffer *b = pb;
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ struct vb2_queue *q = vb->vb2_queue;
+ unsigned int plane;
+
+ /* Copy back data such as timestamp, flags, etc. */
+ b->index = vb->index;
+ b->type = vb->type;
+ b->memory = vb->memory;
+ b->bytesused = 0;
+
+ b->flags = vbuf->flags;
+ b->field = vbuf->field;
+ v4l2_buffer_set_timestamp(b, vb->timestamp);
+ b->timecode = vbuf->timecode;
+ b->sequence = vbuf->sequence;
+ b->reserved2 = 0;
+ b->request_fd = 0;
+
+ if (q->is_multiplanar) {
+ /*
+ * Fill in plane-related data if userspace provided an array
+ * for it. The caller has already verified memory and size.
+ */
+ b->length = vb->num_planes;
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ struct v4l2_plane *pdst = &b->m.planes[plane];
+ struct vb2_plane *psrc = &vb->planes[plane];
+
+ pdst->bytesused = psrc->bytesused;
+ pdst->length = psrc->length;
+ if (q->memory == VB2_MEMORY_MMAP)
+ pdst->m.mem_offset = psrc->m.offset;
+ else if (q->memory == VB2_MEMORY_USERPTR)
+ pdst->m.userptr = psrc->m.userptr;
+ else if (q->memory == VB2_MEMORY_DMABUF)
+ pdst->m.fd = psrc->m.fd;
+ pdst->data_offset = psrc->data_offset;
+ memset(pdst->reserved, 0, sizeof(pdst->reserved));
+ }
+ } else {
+ /*
+ * We use length and offset in v4l2_planes array even for
+ * single-planar buffers, but userspace does not.
+ */
+ b->length = vb->planes[0].length;
+ b->bytesused = vb->planes[0].bytesused;
+ if (q->memory == VB2_MEMORY_MMAP)
+ b->m.offset = vb->planes[0].m.offset;
+ else if (q->memory == VB2_MEMORY_USERPTR)
+ b->m.userptr = vb->planes[0].m.userptr;
+ else if (q->memory == VB2_MEMORY_DMABUF)
+ b->m.fd = vb->planes[0].m.fd;
+ }
+
+ /*
+ * Clear any buffer state related flags.
+ */
+ b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
+ b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
+ if (!q->copy_timestamp) {
+ /*
+ * For non-COPY timestamps, drop timestamp source bits
+ * and obtain the timestamp source from the queue.
+ */
+ b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
+ }
+
+ switch (vb->state) {
+ case VB2_BUF_STATE_QUEUED:
+ case VB2_BUF_STATE_ACTIVE:
+ b->flags |= V4L2_BUF_FLAG_QUEUED;
+ break;
+ case VB2_BUF_STATE_IN_REQUEST:
+ b->flags |= V4L2_BUF_FLAG_IN_REQUEST;
+ break;
+ case VB2_BUF_STATE_ERROR:
+ b->flags |= V4L2_BUF_FLAG_ERROR;
+ fallthrough;
+ case VB2_BUF_STATE_DONE:
+ b->flags |= V4L2_BUF_FLAG_DONE;
+ break;
+ case VB2_BUF_STATE_PREPARING:
+ case VB2_BUF_STATE_DEQUEUED:
+ /* nothing */
+ break;
+ }
+
+ if ((vb->state == VB2_BUF_STATE_DEQUEUED ||
+ vb->state == VB2_BUF_STATE_IN_REQUEST) &&
+ vb->synced && vb->prepared)
+ b->flags |= V4L2_BUF_FLAG_PREPARED;
+
+ if (vb2_buffer_in_use(q, vb))
+ b->flags |= V4L2_BUF_FLAG_MAPPED;
+ if (vbuf->request_fd >= 0) {
+ b->flags |= V4L2_BUF_FLAG_REQUEST_FD;
+ b->request_fd = vbuf->request_fd;
+ }
+}
+
+/*
+ * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
+ * v4l2_buffer by the userspace. It also verifies that struct
+ * v4l2_buffer has a valid number of planes.
+ */
+static int __fill_vb2_buffer(struct vb2_buffer *vb, struct vb2_plane *planes)
+{
+ struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb);
+ unsigned int plane;
+
+ if (!vb->vb2_queue->copy_timestamp)
+ vb->timestamp = 0;
+
+ for (plane = 0; plane < vb->num_planes; ++plane) {
+ if (vb->vb2_queue->memory != VB2_MEMORY_MMAP) {
+ planes[plane].m = vbuf->planes[plane].m;
+ planes[plane].length = vbuf->planes[plane].length;
+ }
+ planes[plane].bytesused = vbuf->planes[plane].bytesused;
+ planes[plane].data_offset = vbuf->planes[plane].data_offset;
+ }
+ return 0;
+}
+
+static const struct vb2_buf_ops v4l2_buf_ops = {
+ .verify_planes_array = __verify_planes_array_core,
+ .init_buffer = __init_vb2_v4l2_buffer,
+ .fill_user_buffer = __fill_v4l2_buffer,
+ .fill_vb2_buffer = __fill_vb2_buffer,
+ .copy_timestamp = __copy_timestamp,
+};
+
+struct vb2_buffer *vb2_find_buffer(struct vb2_queue *q, u64 timestamp)
+{
+ unsigned int i;
+
+ for (i = 0; i < q->num_buffers; i++)
+ if (q->bufs[i]->copied_timestamp &&
+ q->bufs[i]->timestamp == timestamp)
+ return vb2_get_buffer(q, i);
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(vb2_find_buffer);
+
+/*
+ * vb2_querybuf() - query video buffer information
+ * @q: vb2 queue
+ * @b: buffer struct passed from userspace to vidioc_querybuf handler
+ * in driver
+ *
+ * Should be called from vidioc_querybuf ioctl handler in driver.
+ * This function will verify the passed v4l2_buffer structure and fill the
+ * relevant information for the userspace.
+ *
+ * The return values from this function are intended to be directly returned
+ * from vidioc_querybuf handler in driver.
+ */
+int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
+{
+ struct vb2_buffer *vb;
+ int ret;
+
+ if (b->type != q->type) {
+ dprintk(q, 1, "wrong buffer type\n");
+ return -EINVAL;
+ }
+
+ if (b->index >= q->num_buffers) {
+ dprintk(q, 1, "buffer index out of range\n");
+ return -EINVAL;
+ }
+ vb = q->bufs[b->index];
+ ret = __verify_planes_array(vb, b);
+ if (!ret)
+ vb2_core_querybuf(q, b->index, b);
+ return ret;
+}
+EXPORT_SYMBOL(vb2_querybuf);
+
+static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
+{
+ *caps = V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS;
+ if (q->io_modes & VB2_MMAP)
+ *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP;
+ if (q->io_modes & VB2_USERPTR)
+ *caps |= V4L2_BUF_CAP_SUPPORTS_USERPTR;
+ if (q->io_modes & VB2_DMABUF)
+ *caps |= V4L2_BUF_CAP_SUPPORTS_DMABUF;
+ if (q->subsystem_flags & VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF)
+ *caps |= V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF;
+ if (q->allow_cache_hints && q->io_modes & VB2_MMAP)
+ *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS;
+#ifdef CONFIG_MEDIA_CONTROLLER_REQUEST_API
+ if (q->supports_requests)
+ *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
+#endif
+}
+
+static void validate_memory_flags(struct vb2_queue *q,
+ int memory,
+ u32 *flags)
+{
+ if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) {
+ /*
+ * This needs to clear V4L2_MEMORY_FLAG_NON_COHERENT only,
+ * but in order to avoid bugs we zero out all bits.
+ */
+ *flags = 0;
+ } else {
+ /* Clear all unknown flags. */
+ *flags &= V4L2_MEMORY_FLAG_NON_COHERENT;
+ }
+}
+
+int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
+{
+ int ret = vb2_verify_memory_type(q, req->memory, req->type);
+ u32 flags = req->flags;
+
+ fill_buf_caps(q, &req->capabilities);
+ validate_memory_flags(q, req->memory, &flags);
+ req->flags = flags;
+ return ret ? ret : vb2_core_reqbufs(q, req->memory,
+ req->flags, &req->count);
+}
+EXPORT_SYMBOL_GPL(vb2_reqbufs);
+
+int vb2_prepare_buf(struct vb2_queue *q, struct media_device *mdev,
+ struct v4l2_buffer *b)
+{
+ int ret;
+
+ if (vb2_fileio_is_active(q)) {
+ dprintk(q, 1, "file io in progress\n");
+ return -EBUSY;
+ }
+
+ if (b->flags & V4L2_BUF_FLAG_REQUEST_FD)
+ return -EINVAL;
+
+ ret = vb2_queue_or_prepare_buf(q, mdev, b, true, NULL);
+
+ return ret ? ret : vb2_core_prepare_buf(q, b->index, b);
+}
+EXPORT_SYMBOL_GPL(vb2_prepare_buf);
+
+int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
+{
+ unsigned requested_planes = 1;
+ unsigned requested_sizes[VIDEO_MAX_PLANES];
+ struct v4l2_format *f = &create->format;
+ int ret = vb2_verify_memory_type(q, create->memory, f->type);
+ unsigned i;
+
+ fill_buf_caps(q, &create->capabilities);
+ validate_memory_flags(q, create->memory, &create->flags);
+ create->index = q->num_buffers;
+ if (create->count == 0)
+ return ret != -EBUSY ? ret : 0;
+
+ switch (f->type) {
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE:
+ requested_planes = f->fmt.pix_mp.num_planes;
+ if (requested_planes == 0 ||
+ requested_planes > VIDEO_MAX_PLANES)
+ return -EINVAL;
+ for (i = 0; i < requested_planes; i++)
+ requested_sizes[i] =
+ f->fmt.pix_mp.plane_fmt[i].sizeimage;
+ break;
+ case V4L2_BUF_TYPE_VIDEO_CAPTURE:
+ case V4L2_BUF_TYPE_VIDEO_OUTPUT:
+ requested_sizes[0] = f->fmt.pix.sizeimage;
+ break;
+ case V4L2_BUF_TYPE_VBI_CAPTURE:
+ case V4L2_BUF_TYPE_VBI_OUTPUT:
+ requested_sizes[0] = f->fmt.vbi.samples_per_line *
+ (f->fmt.vbi.count[0] + f->fmt.vbi.count[1]);
+ break;
+ case V4L2_BUF_TYPE_SLICED_VBI_CAPTURE:
+ case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
+ requested_sizes[0] = f->fmt.sliced.io_size;
+ break;
+ case V4L2_BUF_TYPE_SDR_CAPTURE:
+ case V4L2_BUF_TYPE_SDR_OUTPUT:
+ requested_sizes[0] = f->fmt.sdr.buffersize;
+ break;
+ case V4L2_BUF_TYPE_META_CAPTURE:
+ case V4L2_BUF_TYPE_META_OUTPUT:
+ requested_sizes[0] = f->fmt.meta.buffersize;
+ break;
+ default:
+ return -EINVAL;
+ }
+ for (i = 0; i < requested_planes; i++)
+ if (requested_sizes[i] == 0)
+ return -EINVAL;
+ return ret ? ret : vb2_core_create_bufs(q, create->memory,
+ create->flags,
+ &create->count,
+ requested_planes,
+ requested_sizes);
+}
+EXPORT_SYMBOL_GPL(vb2_create_bufs);
+
+int vb2_qbuf(struct vb2_queue *q, struct media_device *mdev,
+ struct v4l2_buffer *b)
+{
+ struct media_request *req = NULL;
+ int ret;
+
+ if (vb2_fileio_is_active(q)) {
+ dprintk(q, 1, "file io in progress\n");
+ return -EBUSY;
+ }
+
+ ret = vb2_queue_or_prepare_buf(q, mdev, b, false, &req);
+ if (ret)
+ return ret;
+ ret = vb2_core_qbuf(q, b->index, b, req);
+ if (req)
+ media_request_put(req);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vb2_qbuf);
+
+int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
+{
+ int ret;
+
+ if (vb2_fileio_is_active(q)) {
+ dprintk(q, 1, "file io in progress\n");
+ return -EBUSY;
+ }
+
+ if (b->type != q->type) {
+ dprintk(q, 1, "invalid buffer type\n");
+ return -EINVAL;
+ }
+
+ ret = vb2_core_dqbuf(q, NULL, b, nonblocking);
+
+ if (!q->is_output &&
+ b->flags & V4L2_BUF_FLAG_DONE &&
+ b->flags & V4L2_BUF_FLAG_LAST)
+ q->last_buffer_dequeued = true;
+
+ /*
+ * After calling the VIDIOC_DQBUF V4L2_BUF_FLAG_DONE must be
+ * cleared.
+ */
+ b->flags &= ~V4L2_BUF_FLAG_DONE;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vb2_dqbuf);
+
+int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
+{
+ if (vb2_fileio_is_active(q)) {
+ dprintk(q, 1, "file io in progress\n");
+ return -EBUSY;
+ }
+ return vb2_core_streamon(q, type);
+}
+EXPORT_SYMBOL_GPL(vb2_streamon);
+
+int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
+{
+ if (vb2_fileio_is_active(q)) {
+ dprintk(q, 1, "file io in progress\n");
+ return -EBUSY;
+ }
+ return vb2_core_streamoff(q, type);
+}
+EXPORT_SYMBOL_GPL(vb2_streamoff);
+
+int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
+{
+ return vb2_core_expbuf(q, &eb->fd, eb->type, eb->index,
+ eb->plane, eb->flags);
+}
+EXPORT_SYMBOL_GPL(vb2_expbuf);
+
+int vb2_queue_init_name(struct vb2_queue *q, const char *name)
+{
+ /*
+ * Sanity check
+ */
+ if (WARN_ON(!q) ||
+ WARN_ON(q->timestamp_flags &
+ ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
+ V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
+ return -EINVAL;
+
+ /* Warn that the driver should choose an appropriate timestamp type */
+ WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
+ V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
+
+ /* Warn that vb2_memory should match with v4l2_memory */
+ if (WARN_ON(VB2_MEMORY_MMAP != (int)V4L2_MEMORY_MMAP)
+ || WARN_ON(VB2_MEMORY_USERPTR != (int)V4L2_MEMORY_USERPTR)
+ || WARN_ON(VB2_MEMORY_DMABUF != (int)V4L2_MEMORY_DMABUF))
+ return -EINVAL;
+
+ if (q->buf_struct_size == 0)
+ q->buf_struct_size = sizeof(struct vb2_v4l2_buffer);
+
+ q->buf_ops = &v4l2_buf_ops;
+ q->is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
+ q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
+ q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
+ == V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ /*
+ * For compatibility with vb1: if QBUF hasn't been called yet, then
+ * return EPOLLERR as well. This only affects capture queues, output
+ * queues will always initialize waiting_for_buffers to false.
+ */
+ q->quirk_poll_must_check_waiting_for_buffers = true;
+
+ if (name)
+ strscpy(q->name, name, sizeof(q->name));
+ else
+ q->name[0] = '\0';
+
+ return vb2_core_queue_init(q);
+}
+EXPORT_SYMBOL_GPL(vb2_queue_init_name);
+
+int vb2_queue_init(struct vb2_queue *q)
+{
+ return vb2_queue_init_name(q, NULL);
+}
+EXPORT_SYMBOL_GPL(vb2_queue_init);
+
+void vb2_queue_release(struct vb2_queue *q)
+{
+ vb2_core_queue_release(q);
+}
+EXPORT_SYMBOL_GPL(vb2_queue_release);
+
+int vb2_queue_change_type(struct vb2_queue *q, unsigned int type)
+{
+ if (type == q->type)
+ return 0;
+
+ if (vb2_is_busy(q))
+ return -EBUSY;
+
+ q->type = type;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_queue_change_type);
+
+__poll_t vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
+{
+ struct video_device *vfd = video_devdata(file);
+ __poll_t res;
+
+ res = vb2_core_poll(q, file, wait);
+
+ if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
+ struct v4l2_fh *fh = file->private_data;
+
+ poll_wait(file, &fh->wait, wait);
+ if (v4l2_event_pending(fh))
+ res |= EPOLLPRI;
+ }
+
+ return res;
+}
+EXPORT_SYMBOL_GPL(vb2_poll);
+
+/*
+ * The following functions are not part of the vb2 core API, but are helper
+ * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
+ * and struct vb2_ops.
+ * They contain boilerplate code that most if not all drivers have to do
+ * and so they simplify the driver code.
+ */
+
+/* vb2 ioctl helpers */
+
+int vb2_ioctl_reqbufs(struct file *file, void *priv,
+ struct v4l2_requestbuffers *p)
+{
+ struct video_device *vdev = video_devdata(file);
+ int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
+ u32 flags = p->flags;
+
+ fill_buf_caps(vdev->queue, &p->capabilities);
+ validate_memory_flags(vdev->queue, p->memory, &flags);
+ p->flags = flags;
+ if (res)
+ return res;
+ if (vb2_queue_is_busy(vdev->queue, file))
+ return -EBUSY;
+ res = vb2_core_reqbufs(vdev->queue, p->memory, p->flags, &p->count);
+ /* If count == 0, then the owner has released all buffers and he
+ is no longer owner of the queue. Otherwise we have a new owner. */
+ if (res == 0)
+ vdev->queue->owner = p->count ? file->private_data : NULL;
+ return res;
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
+
+int vb2_ioctl_create_bufs(struct file *file, void *priv,
+ struct v4l2_create_buffers *p)
+{
+ struct video_device *vdev = video_devdata(file);
+ int res = vb2_verify_memory_type(vdev->queue, p->memory,
+ p->format.type);
+
+ p->index = vdev->queue->num_buffers;
+ fill_buf_caps(vdev->queue, &p->capabilities);
+ validate_memory_flags(vdev->queue, p->memory, &p->flags);
+ /*
+ * If count == 0, then just check if memory and type are valid.
+ * Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
+ */
+ if (p->count == 0)
+ return res != -EBUSY ? res : 0;
+ if (res)
+ return res;
+ if (vb2_queue_is_busy(vdev->queue, file))
+ return -EBUSY;
+
+ res = vb2_create_bufs(vdev->queue, p);
+ if (res == 0)
+ vdev->queue->owner = file->private_data;
+ return res;
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
+
+int vb2_ioctl_prepare_buf(struct file *file, void *priv,
+ struct v4l2_buffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev->queue, file))
+ return -EBUSY;
+ return vb2_prepare_buf(vdev->queue, vdev->v4l2_dev->mdev, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
+
+int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ /* No need to call vb2_queue_is_busy(), anyone can query buffers. */
+ return vb2_querybuf(vdev->queue, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
+
+int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev->queue, file))
+ return -EBUSY;
+ return vb2_qbuf(vdev->queue, vdev->v4l2_dev->mdev, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
+
+int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev->queue, file))
+ return -EBUSY;
+ return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
+
+int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev->queue, file))
+ return -EBUSY;
+ return vb2_streamon(vdev->queue, i);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
+
+int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev->queue, file))
+ return -EBUSY;
+ return vb2_streamoff(vdev->queue, i);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
+
+int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (vb2_queue_is_busy(vdev->queue, file))
+ return -EBUSY;
+ return vb2_expbuf(vdev->queue, p);
+}
+EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
+
+/* v4l2_file_operations helpers */
+
+int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ return vb2_mmap(vdev->queue, vma);
+}
+EXPORT_SYMBOL_GPL(vb2_fop_mmap);
+
+int _vb2_fop_release(struct file *file, struct mutex *lock)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ if (lock)
+ mutex_lock(lock);
+ if (file->private_data == vdev->queue->owner) {
+ vb2_queue_release(vdev->queue);
+ vdev->queue->owner = NULL;
+ }
+ if (lock)
+ mutex_unlock(lock);
+ return v4l2_fh_release(file);
+}
+EXPORT_SYMBOL_GPL(_vb2_fop_release);
+
+int vb2_fop_release(struct file *file)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+
+ return _vb2_fop_release(file, lock);
+}
+EXPORT_SYMBOL_GPL(vb2_fop_release);
+
+ssize_t vb2_fop_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+ int err = -EBUSY;
+
+ if (!(vdev->queue->io_modes & VB2_WRITE))
+ return -EINVAL;
+ if (lock && mutex_lock_interruptible(lock))
+ return -ERESTARTSYS;
+ if (vb2_queue_is_busy(vdev->queue, file))
+ goto exit;
+ err = vb2_write(vdev->queue, buf, count, ppos,
+ file->f_flags & O_NONBLOCK);
+ if (vdev->queue->fileio)
+ vdev->queue->owner = file->private_data;
+exit:
+ if (lock)
+ mutex_unlock(lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(vb2_fop_write);
+
+ssize_t vb2_fop_read(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
+ int err = -EBUSY;
+
+ if (!(vdev->queue->io_modes & VB2_READ))
+ return -EINVAL;
+ if (lock && mutex_lock_interruptible(lock))
+ return -ERESTARTSYS;
+ if (vb2_queue_is_busy(vdev->queue, file))
+ goto exit;
+ err = vb2_read(vdev->queue, buf, count, ppos,
+ file->f_flags & O_NONBLOCK);
+ if (vdev->queue->fileio)
+ vdev->queue->owner = file->private_data;
+exit:
+ if (lock)
+ mutex_unlock(lock);
+ return err;
+}
+EXPORT_SYMBOL_GPL(vb2_fop_read);
+
+__poll_t vb2_fop_poll(struct file *file, poll_table *wait)
+{
+ struct video_device *vdev = video_devdata(file);
+ struct vb2_queue *q = vdev->queue;
+ struct mutex *lock = q->lock ? q->lock : vdev->lock;
+ __poll_t res;
+ void *fileio;
+
+ /*
+ * If this helper doesn't know how to lock, then you shouldn't be using
+ * it but you should write your own.
+ */
+ WARN_ON(!lock);
+
+ if (lock && mutex_lock_interruptible(lock))
+ return EPOLLERR;
+
+ fileio = q->fileio;
+
+ res = vb2_poll(vdev->queue, file, wait);
+
+ /* If fileio was started, then we have a new queue owner. */
+ if (!fileio && q->fileio)
+ q->owner = file->private_data;
+ if (lock)
+ mutex_unlock(lock);
+ return res;
+}
+EXPORT_SYMBOL_GPL(vb2_fop_poll);
+
+#ifndef CONFIG_MMU
+unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct video_device *vdev = video_devdata(file);
+
+ return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
+}
+EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
+#endif
+
+void vb2_video_unregister_device(struct video_device *vdev)
+{
+ /* Check if vdev was ever registered at all */
+ if (!vdev || !video_is_registered(vdev))
+ return;
+
+ /*
+ * Calling this function only makes sense if vdev->queue is set.
+ * If it is NULL, then just call video_unregister_device() instead.
+ */
+ WARN_ON(!vdev->queue);
+
+ /*
+ * Take a reference to the device since video_unregister_device()
+ * calls device_unregister(), but we don't want that to release
+ * the device since we want to clean up the queue first.
+ */
+ get_device(&vdev->dev);
+ video_unregister_device(vdev);
+ if (vdev->queue && vdev->queue->owner) {
+ struct mutex *lock = vdev->queue->lock ?
+ vdev->queue->lock : vdev->lock;
+
+ if (lock)
+ mutex_lock(lock);
+ vb2_queue_release(vdev->queue);
+ vdev->queue->owner = NULL;
+ if (lock)
+ mutex_unlock(lock);
+ }
+ /*
+ * Now we put the device, and in most cases this will release
+ * everything.
+ */
+ put_device(&vdev->dev);
+}
+EXPORT_SYMBOL_GPL(vb2_video_unregister_device);
+
+/* vb2_ops helpers. Only use if vq->lock is non-NULL. */
+
+void vb2_ops_wait_prepare(struct vb2_queue *vq)
+{
+ mutex_unlock(vq->lock);
+}
+EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
+
+void vb2_ops_wait_finish(struct vb2_queue *vq)
+{
+ mutex_lock(vq->lock);
+}
+EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
+
+/*
+ * Note that this function is called during validation time and
+ * thus the req_queue_mutex is held to ensure no request objects
+ * can be added or deleted while validating. So there is no need
+ * to protect the objects list.
+ */
+int vb2_request_validate(struct media_request *req)
+{
+ struct media_request_object *obj;
+ int ret = 0;
+
+ if (!vb2_request_buffer_cnt(req))
+ return -ENOENT;
+
+ list_for_each_entry(obj, &req->objects, list) {
+ if (!obj->ops->prepare)
+ continue;
+
+ ret = obj->ops->prepare(obj);
+ if (ret)
+ break;
+ }
+
+ if (ret) {
+ list_for_each_entry_continue_reverse(obj, &req->objects, list)
+ if (obj->ops->unprepare)
+ obj->ops->unprepare(obj);
+ return ret;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vb2_request_validate);
+
+void vb2_request_queue(struct media_request *req)
+{
+ struct media_request_object *obj, *obj_safe;
+
+ /*
+ * Queue all objects. Note that buffer objects are at the end of the
+ * objects list, after all other object types. Once buffer objects
+ * are queued, the driver might delete them immediately (if the driver
+ * processes the buffer at once), so we have to use
+ * list_for_each_entry_safe() to handle the case where the object we
+ * queue is deleted.
+ */
+ list_for_each_entry_safe(obj, obj_safe, &req->objects, list)
+ if (obj->ops->queue)
+ obj->ops->queue(obj);
+}
+EXPORT_SYMBOL_GPL(vb2_request_queue);
+
+MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
+MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/common/videobuf2/videobuf2-vmalloc.c b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
new file mode 100644
index 000000000..948152f15
--- /dev/null
+++ b/drivers/media/common/videobuf2/videobuf2-vmalloc.c
@@ -0,0 +1,450 @@
+/*
+ * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
+ *
+ * Copyright (C) 2010 Samsung Electronics
+ *
+ * Author: Pawel Osciak <pawel@osciak.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/refcount.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+
+#include <media/videobuf2-v4l2.h>
+#include <media/videobuf2-vmalloc.h>
+#include <media/videobuf2-memops.h>
+
+struct vb2_vmalloc_buf {
+ void *vaddr;
+ struct frame_vector *vec;
+ enum dma_data_direction dma_dir;
+ unsigned long size;
+ refcount_t refcount;
+ struct vb2_vmarea_handler handler;
+ struct dma_buf *dbuf;
+};
+
+static void vb2_vmalloc_put(void *buf_priv);
+
+static void *vb2_vmalloc_alloc(struct vb2_buffer *vb, struct device *dev,
+ unsigned long size)
+{
+ struct vb2_vmalloc_buf *buf;
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL | vb->vb2_queue->gfp_flags);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->size = size;
+ buf->vaddr = vmalloc_user(buf->size);
+ if (!buf->vaddr) {
+ pr_debug("vmalloc of size %ld failed\n", buf->size);
+ kfree(buf);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ buf->dma_dir = vb->vb2_queue->dma_dir;
+ buf->handler.refcount = &buf->refcount;
+ buf->handler.put = vb2_vmalloc_put;
+ buf->handler.arg = buf;
+
+ refcount_set(&buf->refcount, 1);
+ return buf;
+}
+
+static void vb2_vmalloc_put(void *buf_priv)
+{
+ struct vb2_vmalloc_buf *buf = buf_priv;
+
+ if (refcount_dec_and_test(&buf->refcount)) {
+ vfree(buf->vaddr);
+ kfree(buf);
+ }
+}
+
+static void *vb2_vmalloc_get_userptr(struct vb2_buffer *vb, struct device *dev,
+ unsigned long vaddr, unsigned long size)
+{
+ struct vb2_vmalloc_buf *buf;
+ struct frame_vector *vec;
+ int n_pages, offset, i;
+ int ret = -ENOMEM;
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->dma_dir = vb->vb2_queue->dma_dir;
+ offset = vaddr & ~PAGE_MASK;
+ buf->size = size;
+ vec = vb2_create_framevec(vaddr, size);
+ if (IS_ERR(vec)) {
+ ret = PTR_ERR(vec);
+ goto fail_pfnvec_create;
+ }
+ buf->vec = vec;
+ n_pages = frame_vector_count(vec);
+ if (frame_vector_to_pages(vec) < 0) {
+ unsigned long *nums = frame_vector_pfns(vec);
+
+ /*
+ * We cannot get page pointers for these pfns. Check memory is
+ * physically contiguous and use direct mapping.
+ */
+ for (i = 1; i < n_pages; i++)
+ if (nums[i-1] + 1 != nums[i])
+ goto fail_map;
+ buf->vaddr = (__force void *)
+ ioremap(__pfn_to_phys(nums[0]), size + offset);
+ } else {
+ buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1);
+ }
+
+ if (!buf->vaddr)
+ goto fail_map;
+ buf->vaddr += offset;
+ return buf;
+
+fail_map:
+ vb2_destroy_framevec(vec);
+fail_pfnvec_create:
+ kfree(buf);
+
+ return ERR_PTR(ret);
+}
+
+static void vb2_vmalloc_put_userptr(void *buf_priv)
+{
+ struct vb2_vmalloc_buf *buf = buf_priv;
+ unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
+ unsigned int i;
+ struct page **pages;
+ unsigned int n_pages;
+
+ if (!buf->vec->is_pfns) {
+ n_pages = frame_vector_count(buf->vec);
+ pages = frame_vector_pages(buf->vec);
+ if (vaddr)
+ vm_unmap_ram((void *)vaddr, n_pages);
+ if (buf->dma_dir == DMA_FROM_DEVICE ||
+ buf->dma_dir == DMA_BIDIRECTIONAL)
+ for (i = 0; i < n_pages; i++)
+ set_page_dirty_lock(pages[i]);
+ } else {
+ iounmap((__force void __iomem *)buf->vaddr);
+ }
+ vb2_destroy_framevec(buf->vec);
+ kfree(buf);
+}
+
+static void *vb2_vmalloc_vaddr(struct vb2_buffer *vb, void *buf_priv)
+{
+ struct vb2_vmalloc_buf *buf = buf_priv;
+
+ if (!buf->vaddr) {
+ pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
+ return NULL;
+ }
+
+ return buf->vaddr;
+}
+
+static unsigned int vb2_vmalloc_num_users(void *buf_priv)
+{
+ struct vb2_vmalloc_buf *buf = buf_priv;
+ return refcount_read(&buf->refcount);
+}
+
+static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
+{
+ struct vb2_vmalloc_buf *buf = buf_priv;
+ int ret;
+
+ if (!buf) {
+ pr_err("No memory to map\n");
+ return -EINVAL;
+ }
+
+ ret = remap_vmalloc_range(vma, buf->vaddr, 0);
+ if (ret) {
+ pr_err("Remapping vmalloc memory, error: %d\n", ret);
+ return ret;
+ }
+
+ /*
+ * Make sure that vm_areas for 2 buffers won't be merged together
+ */
+ vma->vm_flags |= VM_DONTEXPAND;
+
+ /*
+ * Use common vm_area operations to track buffer refcount.
+ */
+ vma->vm_private_data = &buf->handler;
+ vma->vm_ops = &vb2_common_vm_ops;
+
+ vma->vm_ops->open(vma);
+
+ return 0;
+}
+
+#ifdef CONFIG_HAS_DMA
+/*********************************************/
+/* DMABUF ops for exporters */
+/*********************************************/
+
+struct vb2_vmalloc_attachment {
+ struct sg_table sgt;
+ enum dma_data_direction dma_dir;
+};
+
+static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
+ struct dma_buf_attachment *dbuf_attach)
+{
+ struct vb2_vmalloc_attachment *attach;
+ struct vb2_vmalloc_buf *buf = dbuf->priv;
+ int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
+ struct sg_table *sgt;
+ struct scatterlist *sg;
+ void *vaddr = buf->vaddr;
+ int ret;
+ int i;
+
+ attach = kzalloc(sizeof(*attach), GFP_KERNEL);
+ if (!attach)
+ return -ENOMEM;
+
+ sgt = &attach->sgt;
+ ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
+ if (ret) {
+ kfree(attach);
+ return ret;
+ }
+ for_each_sgtable_sg(sgt, sg, i) {
+ struct page *page = vmalloc_to_page(vaddr);
+
+ if (!page) {
+ sg_free_table(sgt);
+ kfree(attach);
+ return -ENOMEM;
+ }
+ sg_set_page(sg, page, PAGE_SIZE, 0);
+ vaddr += PAGE_SIZE;
+ }
+
+ attach->dma_dir = DMA_NONE;
+ dbuf_attach->priv = attach;
+ return 0;
+}
+
+static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
+ struct dma_buf_attachment *db_attach)
+{
+ struct vb2_vmalloc_attachment *attach = db_attach->priv;
+ struct sg_table *sgt;
+
+ if (!attach)
+ return;
+
+ sgt = &attach->sgt;
+
+ /* release the scatterlist cache */
+ if (attach->dma_dir != DMA_NONE)
+ dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
+ sg_free_table(sgt);
+ kfree(attach);
+ db_attach->priv = NULL;
+}
+
+static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
+ struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
+{
+ struct vb2_vmalloc_attachment *attach = db_attach->priv;
+ /* stealing dmabuf mutex to serialize map/unmap operations */
+ struct mutex *lock = &db_attach->dmabuf->lock;
+ struct sg_table *sgt;
+
+ mutex_lock(lock);
+
+ sgt = &attach->sgt;
+ /* return previously mapped sg table */
+ if (attach->dma_dir == dma_dir) {
+ mutex_unlock(lock);
+ return sgt;
+ }
+
+ /* release any previous cache */
+ if (attach->dma_dir != DMA_NONE) {
+ dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
+ attach->dma_dir = DMA_NONE;
+ }
+
+ /* mapping to the client with new direction */
+ if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
+ pr_err("failed to map scatterlist\n");
+ mutex_unlock(lock);
+ return ERR_PTR(-EIO);
+ }
+
+ attach->dma_dir = dma_dir;
+
+ mutex_unlock(lock);
+
+ return sgt;
+}
+
+static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
+ struct sg_table *sgt, enum dma_data_direction dma_dir)
+{
+ /* nothing to be done here */
+}
+
+static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
+{
+ /* drop reference obtained in vb2_vmalloc_get_dmabuf */
+ vb2_vmalloc_put(dbuf->priv);
+}
+
+static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf,
+ struct iosys_map *map)
+{
+ struct vb2_vmalloc_buf *buf = dbuf->priv;
+
+ iosys_map_set_vaddr(map, buf->vaddr);
+
+ return 0;
+}
+
+static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
+ struct vm_area_struct *vma)
+{
+ return vb2_vmalloc_mmap(dbuf->priv, vma);
+}
+
+static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
+ .attach = vb2_vmalloc_dmabuf_ops_attach,
+ .detach = vb2_vmalloc_dmabuf_ops_detach,
+ .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
+ .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
+ .vmap = vb2_vmalloc_dmabuf_ops_vmap,
+ .mmap = vb2_vmalloc_dmabuf_ops_mmap,
+ .release = vb2_vmalloc_dmabuf_ops_release,
+};
+
+static struct dma_buf *vb2_vmalloc_get_dmabuf(struct vb2_buffer *vb,
+ void *buf_priv,
+ unsigned long flags)
+{
+ struct vb2_vmalloc_buf *buf = buf_priv;
+ struct dma_buf *dbuf;
+ DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
+
+ exp_info.ops = &vb2_vmalloc_dmabuf_ops;
+ exp_info.size = buf->size;
+ exp_info.flags = flags;
+ exp_info.priv = buf;
+
+ if (WARN_ON(!buf->vaddr))
+ return NULL;
+
+ dbuf = dma_buf_export(&exp_info);
+ if (IS_ERR(dbuf))
+ return NULL;
+
+ /* dmabuf keeps reference to vb2 buffer */
+ refcount_inc(&buf->refcount);
+
+ return dbuf;
+}
+#endif /* CONFIG_HAS_DMA */
+
+
+/*********************************************/
+/* callbacks for DMABUF buffers */
+/*********************************************/
+
+static int vb2_vmalloc_map_dmabuf(void *mem_priv)
+{
+ struct vb2_vmalloc_buf *buf = mem_priv;
+ struct iosys_map map;
+ int ret;
+
+ ret = dma_buf_vmap(buf->dbuf, &map);
+ if (ret)
+ return -EFAULT;
+ buf->vaddr = map.vaddr;
+
+ return 0;
+}
+
+static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
+{
+ struct vb2_vmalloc_buf *buf = mem_priv;
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
+
+ dma_buf_vunmap(buf->dbuf, &map);
+ buf->vaddr = NULL;
+}
+
+static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
+{
+ struct vb2_vmalloc_buf *buf = mem_priv;
+ struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
+
+ if (buf->vaddr)
+ dma_buf_vunmap(buf->dbuf, &map);
+
+ kfree(buf);
+}
+
+static void *vb2_vmalloc_attach_dmabuf(struct vb2_buffer *vb,
+ struct device *dev,
+ struct dma_buf *dbuf,
+ unsigned long size)
+{
+ struct vb2_vmalloc_buf *buf;
+
+ if (dbuf->size < size)
+ return ERR_PTR(-EFAULT);
+
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (!buf)
+ return ERR_PTR(-ENOMEM);
+
+ buf->dbuf = dbuf;
+ buf->dma_dir = vb->vb2_queue->dma_dir;
+ buf->size = size;
+
+ return buf;
+}
+
+
+const struct vb2_mem_ops vb2_vmalloc_memops = {
+ .alloc = vb2_vmalloc_alloc,
+ .put = vb2_vmalloc_put,
+ .get_userptr = vb2_vmalloc_get_userptr,
+ .put_userptr = vb2_vmalloc_put_userptr,
+#ifdef CONFIG_HAS_DMA
+ .get_dmabuf = vb2_vmalloc_get_dmabuf,
+#endif
+ .map_dmabuf = vb2_vmalloc_map_dmabuf,
+ .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
+ .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
+ .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
+ .vaddr = vb2_vmalloc_vaddr,
+ .mmap = vb2_vmalloc_mmap,
+ .num_users = vb2_vmalloc_num_users,
+};
+EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
+
+MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
+MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
+MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS(DMA_BUF);