summaryrefslogtreecommitdiffstats
path: root/drivers/iio/buffer
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:18:06 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:18:06 +0000
commit638a9e433ecd61e64761352dbec1fa4f5874c941 (patch)
treefdbff74a238d7a5a7d1cef071b7230bc064b9f25 /drivers/iio/buffer
parentReleasing progress-linux version 6.9.12-1~progress7.99u1. (diff)
downloadlinux-638a9e433ecd61e64761352dbec1fa4f5874c941.tar.xz
linux-638a9e433ecd61e64761352dbec1fa4f5874c941.zip
Merging upstream version 6.10.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/iio/buffer')
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dma.c100
-rw-r--r--drivers/iio/buffer/industrialio-buffer-dmaengine.c86
2 files changed, 127 insertions, 59 deletions
diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c
index 5610ba6792..13b1a85896 100644
--- a/drivers/iio/buffer/industrialio-buffer-dma.c
+++ b/drivers/iio/buffer/industrialio-buffer-dma.c
@@ -195,6 +195,18 @@ static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
block->state = IIO_BLOCK_STATE_DONE;
}
+static void iio_dma_buffer_queue_wake(struct iio_dma_buffer_queue *queue)
+{
+ __poll_t flags;
+
+ if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
+ flags = EPOLLIN | EPOLLRDNORM;
+ else
+ flags = EPOLLOUT | EPOLLWRNORM;
+
+ wake_up_interruptible_poll(&queue->buffer.pollq, flags);
+}
+
/**
* iio_dma_buffer_block_done() - Indicate that a block has been completed
* @block: The completed block
@@ -212,7 +224,7 @@ void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
spin_unlock_irqrestore(&queue->list_lock, flags);
iio_buffer_block_put_atomic(block);
- wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
+ iio_dma_buffer_queue_wake(queue);
}
EXPORT_SYMBOL_GPL(iio_dma_buffer_block_done);
@@ -241,7 +253,7 @@ void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
}
spin_unlock_irqrestore(&queue->list_lock, flags);
- wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM);
+ iio_dma_buffer_queue_wake(queue);
}
EXPORT_SYMBOL_GPL(iio_dma_buffer_block_list_abort);
@@ -335,8 +347,24 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer)
queue->fileio.blocks[i] = block;
}
- block->state = IIO_BLOCK_STATE_QUEUED;
- list_add_tail(&block->head, &queue->incoming);
+ /*
+ * block->bytes_used may have been modified previously, e.g. by
+ * iio_dma_buffer_block_list_abort(). Reset it here to the
+ * block's so that iio_dma_buffer_io() will work.
+ */
+ block->bytes_used = block->size;
+
+ /*
+ * If it's an input buffer, mark the block as queued, and
+ * iio_dma_buffer_enable() will submit it. Otherwise mark it as
+ * done, which means it's ready to be dequeued.
+ */
+ if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) {
+ block->state = IIO_BLOCK_STATE_QUEUED;
+ list_add_tail(&block->head, &queue->incoming);
+ } else {
+ block->state = IIO_BLOCK_STATE_DONE;
+ }
}
out_unlock:
@@ -488,20 +516,12 @@ static struct iio_dma_buffer_block *iio_dma_buffer_dequeue(
return block;
}
-/**
- * iio_dma_buffer_read() - DMA buffer read callback
- * @buffer: Buffer to read form
- * @n: Number of bytes to read
- * @user_buffer: Userspace buffer to copy the data to
- *
- * Should be used as the read callback for iio_buffer_access_ops
- * struct for DMA buffers.
- */
-int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
- char __user *user_buffer)
+static int iio_dma_buffer_io(struct iio_buffer *buffer, size_t n,
+ char __user *user_buffer, bool is_from_user)
{
struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
struct iio_dma_buffer_block *block;
+ void *addr;
int ret;
if (n < buffer->bytes_per_datum)
@@ -524,8 +544,13 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
n = rounddown(n, buffer->bytes_per_datum);
if (n > block->bytes_used - queue->fileio.pos)
n = block->bytes_used - queue->fileio.pos;
+ addr = block->vaddr + queue->fileio.pos;
- if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) {
+ if (is_from_user)
+ ret = copy_from_user(addr, user_buffer, n);
+ else
+ ret = copy_to_user(user_buffer, addr, n);
+ if (ret) {
ret = -EFAULT;
goto out_unlock;
}
@@ -544,16 +569,49 @@ out_unlock:
return ret;
}
+
+/**
+ * iio_dma_buffer_read() - DMA buffer read callback
+ * @buffer: Buffer to read form
+ * @n: Number of bytes to read
+ * @user_buffer: Userspace buffer to copy the data to
+ *
+ * Should be used as the read callback for iio_buffer_access_ops
+ * struct for DMA buffers.
+ */
+int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
+ char __user *user_buffer)
+{
+ return iio_dma_buffer_io(buffer, n, user_buffer, false);
+}
EXPORT_SYMBOL_GPL(iio_dma_buffer_read);
/**
- * iio_dma_buffer_data_available() - DMA buffer data_available callback
- * @buf: Buffer to check for data availability
+ * iio_dma_buffer_write() - DMA buffer write callback
+ * @buffer: Buffer to read form
+ * @n: Number of bytes to read
+ * @user_buffer: Userspace buffer to copy the data from
*
- * Should be used as the data_available callback for iio_buffer_access_ops
+ * Should be used as the write callback for iio_buffer_access_ops
* struct for DMA buffers.
*/
-size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
+int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
+ const char __user *user_buffer)
+{
+ return iio_dma_buffer_io(buffer, n,
+ (__force __user char *)user_buffer, true);
+}
+EXPORT_SYMBOL_GPL(iio_dma_buffer_write);
+
+/**
+ * iio_dma_buffer_usage() - DMA buffer data_available and
+ * space_available callback
+ * @buf: Buffer to check for data availability
+ *
+ * Should be used as the data_available and space_available callbacks for
+ * iio_buffer_access_ops struct for DMA buffers.
+ */
+size_t iio_dma_buffer_usage(struct iio_buffer *buf)
{
struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
struct iio_dma_buffer_block *block;
@@ -586,7 +644,7 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf)
return data_available;
}
-EXPORT_SYMBOL_GPL(iio_dma_buffer_data_available);
+EXPORT_SYMBOL_GPL(iio_dma_buffer_usage);
/**
* iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
diff --git a/drivers/iio/buffer/industrialio-buffer-dmaengine.c b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
index a18c1da292..918f6f8d65 100644
--- a/drivers/iio/buffer/industrialio-buffer-dmaengine.c
+++ b/drivers/iio/buffer/industrialio-buffer-dmaengine.c
@@ -64,14 +64,25 @@ static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
struct dmaengine_buffer *dmaengine_buffer =
iio_buffer_to_dmaengine_buffer(&queue->buffer);
struct dma_async_tx_descriptor *desc;
+ enum dma_transfer_direction dma_dir;
+ size_t max_size;
dma_cookie_t cookie;
- block->bytes_used = min(block->size, dmaengine_buffer->max_size);
- block->bytes_used = round_down(block->bytes_used,
- dmaengine_buffer->align);
+ max_size = min(block->size, dmaengine_buffer->max_size);
+ max_size = round_down(max_size, dmaengine_buffer->align);
+
+ if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) {
+ block->bytes_used = max_size;
+ dma_dir = DMA_DEV_TO_MEM;
+ } else {
+ dma_dir = DMA_MEM_TO_DEV;
+ }
+
+ if (!block->bytes_used || block->bytes_used > max_size)
+ return -EINVAL;
desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
- block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM,
+ block->phys_addr, block->bytes_used, dma_dir,
DMA_PREP_INTERRUPT);
if (!desc)
return -ENOMEM;
@@ -112,12 +123,14 @@ static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
.read = iio_dma_buffer_read,
+ .write = iio_dma_buffer_write,
.set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
.set_length = iio_dma_buffer_set_length,
.request_update = iio_dma_buffer_request_update,
.enable = iio_dma_buffer_enable,
.disable = iio_dma_buffer_disable,
- .data_available = iio_dma_buffer_data_available,
+ .data_available = iio_dma_buffer_usage,
+ .space_available = iio_dma_buffer_usage,
.release = iio_dmaengine_buffer_release,
.modes = INDIO_BUFFER_HARDWARE,
@@ -159,7 +172,7 @@ static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
* Once done using the buffer iio_dmaengine_buffer_free() should be used to
* release it.
*/
-struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
+static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
const char *channel)
{
struct dmaengine_buffer *dmaengine_buffer;
@@ -210,7 +223,6 @@ err_free:
kfree(dmaengine_buffer);
return ERR_PTR(ret);
}
-EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_alloc, IIO_DMAENGINE_BUFFER);
/**
* iio_dmaengine_buffer_free() - Free dmaengine buffer
@@ -230,66 +242,64 @@ void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
}
EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER);
-static void __devm_iio_dmaengine_buffer_free(void *buffer)
-{
- iio_dmaengine_buffer_free(buffer);
-}
-
-/**
- * devm_iio_dmaengine_buffer_alloc() - Resource-managed iio_dmaengine_buffer_alloc()
- * @dev: Parent device for the buffer
- * @channel: DMA channel name, typically "rx".
- *
- * This allocates a new IIO buffer which internally uses the DMAengine framework
- * to perform its transfers. The parent device will be used to request the DMA
- * channel.
- *
- * The buffer will be automatically de-allocated once the device gets destroyed.
- */
-static struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
- const char *channel)
+struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
+ struct iio_dev *indio_dev,
+ const char *channel,
+ enum iio_buffer_direction dir)
{
struct iio_buffer *buffer;
int ret;
buffer = iio_dmaengine_buffer_alloc(dev, channel);
if (IS_ERR(buffer))
- return buffer;
+ return ERR_CAST(buffer);
- ret = devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
- buffer);
- if (ret)
+ indio_dev->modes |= INDIO_BUFFER_HARDWARE;
+
+ buffer->direction = dir;
+
+ ret = iio_device_attach_buffer(indio_dev, buffer);
+ if (ret) {
+ iio_dmaengine_buffer_free(buffer);
return ERR_PTR(ret);
+ }
return buffer;
}
+EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER);
+
+static void __devm_iio_dmaengine_buffer_free(void *buffer)
+{
+ iio_dmaengine_buffer_free(buffer);
+}
/**
- * devm_iio_dmaengine_buffer_setup() - Setup a DMA buffer for an IIO device
+ * devm_iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
* @dev: Parent device for the buffer
* @indio_dev: IIO device to which to attach this buffer.
* @channel: DMA channel name, typically "rx".
+ * @dir: Direction of buffer (in or out)
*
* This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
* and attaches it to an IIO device with iio_device_attach_buffer().
* It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
* IIO device.
*/
-int devm_iio_dmaengine_buffer_setup(struct device *dev,
- struct iio_dev *indio_dev,
- const char *channel)
+int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
+ struct iio_dev *indio_dev,
+ const char *channel,
+ enum iio_buffer_direction dir)
{
struct iio_buffer *buffer;
- buffer = devm_iio_dmaengine_buffer_alloc(dev, channel);
+ buffer = iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, dir);
if (IS_ERR(buffer))
return PTR_ERR(buffer);
- indio_dev->modes |= INDIO_BUFFER_HARDWARE;
-
- return iio_device_attach_buffer(indio_dev, buffer);
+ return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
+ buffer);
}
-EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER);
+EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("DMA buffer for the IIO framework");