diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/dma/hsu | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/dma/hsu')
-rw-r--r-- | drivers/dma/hsu/Kconfig | 10 | ||||
-rw-r--r-- | drivers/dma/hsu/Makefile | 6 | ||||
-rw-r--r-- | drivers/dma/hsu/hsu.c | 512 | ||||
-rw-r--r-- | drivers/dma/hsu/hsu.h | 127 | ||||
-rw-r--r-- | drivers/dma/hsu/pci.c | 135 |
5 files changed, 790 insertions, 0 deletions
diff --git a/drivers/dma/hsu/Kconfig b/drivers/dma/hsu/Kconfig new file mode 100644 index 000000000..af102baec --- /dev/null +++ b/drivers/dma/hsu/Kconfig @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0-only +# DMA engine configuration for hsu +config HSU_DMA + tristate + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + +config HSU_DMA_PCI + tristate + depends on HSU_DMA && PCI diff --git a/drivers/dma/hsu/Makefile b/drivers/dma/hsu/Makefile new file mode 100644 index 000000000..61829b1de --- /dev/null +++ b/drivers/dma/hsu/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_HSU_DMA) += hsu_dma.o +hsu_dma-objs := hsu.o + +obj-$(CONFIG_HSU_DMA_PCI) += hsu_dma_pci.o +hsu_dma_pci-objs := pci.o diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c new file mode 100644 index 000000000..af5a2e252 --- /dev/null +++ b/drivers/dma/hsu/hsu.c @@ -0,0 +1,512 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Core driver for the High Speed UART DMA + * + * Copyright (C) 2015 Intel Corporation + * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> + * + * Partially based on the bits found in drivers/tty/serial/mfd.c. + */ + +/* + * DMA channel allocation: + * 1. Even number chans are used for DMA Read (UART TX), odd chans for DMA + * Write (UART RX). + * 2. 0/1 channel are assigned to port 0, 2/3 chan to port 1, 4/5 chan to + * port 3, and so on. + */ + +#include <linux/bits.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/percpu-defs.h> +#include <linux/scatterlist.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/spinlock.h> + +#include "hsu.h" + +#define HSU_DMA_BUSWIDTHS \ + BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ + BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_16_BYTES) + +static inline void hsu_chan_disable(struct hsu_dma_chan *hsuc) +{ + hsu_chan_writel(hsuc, HSU_CH_CR, 0); +} + +static inline void hsu_chan_enable(struct hsu_dma_chan *hsuc) +{ + u32 cr = HSU_CH_CR_CHA; + + if (hsuc->direction == DMA_MEM_TO_DEV) + cr &= ~HSU_CH_CR_CHD; + else if (hsuc->direction == DMA_DEV_TO_MEM) + cr |= HSU_CH_CR_CHD; + + hsu_chan_writel(hsuc, HSU_CH_CR, cr); +} + +static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc) +{ + struct dma_slave_config *config = &hsuc->config; + struct hsu_dma_desc *desc = hsuc->desc; + u32 bsr = 0, mtsr = 0; /* to shut the compiler up */ + u32 dcr = HSU_CH_DCR_CHSOE | HSU_CH_DCR_CHEI; + unsigned int i, count; + + if (hsuc->direction == DMA_MEM_TO_DEV) { + bsr = config->dst_maxburst; + mtsr = config->dst_addr_width; + } else if (hsuc->direction == DMA_DEV_TO_MEM) { + bsr = config->src_maxburst; + mtsr = config->src_addr_width; + } + + hsu_chan_disable(hsuc); + + hsu_chan_writel(hsuc, HSU_CH_DCR, 0); + hsu_chan_writel(hsuc, HSU_CH_BSR, bsr); + hsu_chan_writel(hsuc, HSU_CH_MTSR, mtsr); + + /* Set descriptors */ + count = desc->nents - desc->active; + for (i = 0; i < count && i < HSU_DMA_CHAN_NR_DESC; i++) { + hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr); + hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len); + + /* Prepare value for DCR */ + dcr |= HSU_CH_DCR_DESCA(i); + dcr |= HSU_CH_DCR_CHTOI(i); /* timeout bit, see HSU Errata 1 */ + + desc->active++; + } + /* Only for the last descriptor in the chain */ + dcr |= HSU_CH_DCR_CHSOD(count - 1); + dcr |= HSU_CH_DCR_CHDI(count - 1); + + hsu_chan_writel(hsuc, HSU_CH_DCR, dcr); + + hsu_chan_enable(hsuc); +} + +static void hsu_dma_stop_channel(struct hsu_dma_chan *hsuc) +{ + hsu_chan_disable(hsuc); + hsu_chan_writel(hsuc, HSU_CH_DCR, 0); +} + +static void hsu_dma_start_channel(struct hsu_dma_chan *hsuc) +{ + hsu_dma_chan_start(hsuc); +} + +static void hsu_dma_start_transfer(struct hsu_dma_chan *hsuc) +{ + struct virt_dma_desc *vdesc; + + /* Get the next descriptor */ + vdesc = vchan_next_desc(&hsuc->vchan); + if (!vdesc) { + hsuc->desc = NULL; + return; + } + + list_del(&vdesc->node); + hsuc->desc = to_hsu_dma_desc(vdesc); + + /* Start the channel with a new descriptor */ + hsu_dma_start_channel(hsuc); +} + +/* + * hsu_dma_get_status() - get DMA channel status + * @chip: HSUART DMA chip + * @nr: DMA channel number + * @status: pointer for DMA Channel Status Register value + * + * Description: + * The function reads and clears the DMA Channel Status Register, checks + * if it was a timeout interrupt and returns a corresponding value. + * + * Caller should provide a valid pointer for the DMA Channel Status + * Register value that will be returned in @status. + * + * Return: + * 1 for DMA timeout status, 0 for other DMA status, or error code for + * invalid parameters or no interrupt pending. + */ +int hsu_dma_get_status(struct hsu_dma_chip *chip, unsigned short nr, + u32 *status) +{ + struct hsu_dma_chan *hsuc; + unsigned long flags; + u32 sr; + + /* Sanity check */ + if (nr >= chip->hsu->nr_channels) + return -EINVAL; + + hsuc = &chip->hsu->chan[nr]; + + /* + * No matter what situation, need read clear the IRQ status + * There is a bug, see Errata 5, HSD 2900918 + */ + spin_lock_irqsave(&hsuc->vchan.lock, flags); + sr = hsu_chan_readl(hsuc, HSU_CH_SR); + spin_unlock_irqrestore(&hsuc->vchan.lock, flags); + + /* Check if any interrupt is pending */ + sr &= ~(HSU_CH_SR_DESCE_ANY | HSU_CH_SR_CDESC_ANY); + if (!sr) + return -EIO; + + /* Timeout IRQ, need wait some time, see Errata 2 */ + if (sr & HSU_CH_SR_DESCTO_ANY) + udelay(2); + + /* + * At this point, at least one of Descriptor Time Out, Channel Error + * or Descriptor Done bits must be set. Clear the Descriptor Time Out + * bits and if sr is still non-zero, it must be channel error or + * descriptor done which are higher priority than timeout and handled + * in hsu_dma_do_irq(). Else, it must be a timeout. + */ + sr &= ~HSU_CH_SR_DESCTO_ANY; + + *status = sr; + + return sr ? 0 : 1; +} +EXPORT_SYMBOL_GPL(hsu_dma_get_status); + +/* + * hsu_dma_do_irq() - DMA interrupt handler + * @chip: HSUART DMA chip + * @nr: DMA channel number + * @status: Channel Status Register value + * + * Description: + * This function handles Channel Error and Descriptor Done interrupts. + * This function should be called after determining that the DMA interrupt + * is not a normal timeout interrupt, ie. hsu_dma_get_status() returned 0. + * + * Return: + * 0 for invalid channel number, 1 otherwise. + */ +int hsu_dma_do_irq(struct hsu_dma_chip *chip, unsigned short nr, u32 status) +{ + struct dma_chan_percpu *stat; + struct hsu_dma_chan *hsuc; + struct hsu_dma_desc *desc; + unsigned long flags; + + /* Sanity check */ + if (nr >= chip->hsu->nr_channels) + return 0; + + hsuc = &chip->hsu->chan[nr]; + stat = this_cpu_ptr(hsuc->vchan.chan.local); + + spin_lock_irqsave(&hsuc->vchan.lock, flags); + desc = hsuc->desc; + if (desc) { + if (status & HSU_CH_SR_CHE) { + desc->status = DMA_ERROR; + } else if (desc->active < desc->nents) { + hsu_dma_start_channel(hsuc); + } else { + vchan_cookie_complete(&desc->vdesc); + desc->status = DMA_COMPLETE; + stat->bytes_transferred += desc->length; + hsu_dma_start_transfer(hsuc); + } + } + spin_unlock_irqrestore(&hsuc->vchan.lock, flags); + + return 1; +} +EXPORT_SYMBOL_GPL(hsu_dma_do_irq); + +static struct hsu_dma_desc *hsu_dma_alloc_desc(unsigned int nents) +{ + struct hsu_dma_desc *desc; + + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); + if (!desc) + return NULL; + + desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_NOWAIT); + if (!desc->sg) { + kfree(desc); + return NULL; + } + + return desc; +} + +static void hsu_dma_desc_free(struct virt_dma_desc *vdesc) +{ + struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc); + + kfree(desc->sg); + kfree(desc); +} + +static struct dma_async_tx_descriptor *hsu_dma_prep_slave_sg( + struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sg_len, enum dma_transfer_direction direction, + unsigned long flags, void *context) +{ + struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); + struct hsu_dma_desc *desc; + struct scatterlist *sg; + unsigned int i; + + desc = hsu_dma_alloc_desc(sg_len); + if (!desc) + return NULL; + + for_each_sg(sgl, sg, sg_len, i) { + desc->sg[i].addr = sg_dma_address(sg); + desc->sg[i].len = sg_dma_len(sg); + + desc->length += sg_dma_len(sg); + } + + desc->nents = sg_len; + desc->direction = direction; + /* desc->active = 0 by kzalloc */ + desc->status = DMA_IN_PROGRESS; + + return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags); +} + +static void hsu_dma_issue_pending(struct dma_chan *chan) +{ + struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&hsuc->vchan.lock, flags); + if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc) + hsu_dma_start_transfer(hsuc); + spin_unlock_irqrestore(&hsuc->vchan.lock, flags); +} + +static size_t hsu_dma_active_desc_size(struct hsu_dma_chan *hsuc) +{ + struct hsu_dma_desc *desc = hsuc->desc; + size_t bytes = 0; + int i; + + for (i = desc->active; i < desc->nents; i++) + bytes += desc->sg[i].len; + + i = HSU_DMA_CHAN_NR_DESC - 1; + do { + bytes += hsu_chan_readl(hsuc, HSU_CH_DxTSR(i)); + } while (--i >= 0); + + return bytes; +} + +static enum dma_status hsu_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *state) +{ + struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); + struct virt_dma_desc *vdesc; + enum dma_status status; + size_t bytes; + unsigned long flags; + + status = dma_cookie_status(chan, cookie, state); + if (status == DMA_COMPLETE) + return status; + + spin_lock_irqsave(&hsuc->vchan.lock, flags); + vdesc = vchan_find_desc(&hsuc->vchan, cookie); + if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) { + bytes = hsu_dma_active_desc_size(hsuc); + dma_set_residue(state, bytes); + status = hsuc->desc->status; + } else if (vdesc) { + bytes = to_hsu_dma_desc(vdesc)->length; + dma_set_residue(state, bytes); + } + spin_unlock_irqrestore(&hsuc->vchan.lock, flags); + + return status; +} + +static int hsu_dma_slave_config(struct dma_chan *chan, + struct dma_slave_config *config) +{ + struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); + + memcpy(&hsuc->config, config, sizeof(hsuc->config)); + + return 0; +} + +static int hsu_dma_pause(struct dma_chan *chan) +{ + struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&hsuc->vchan.lock, flags); + if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) { + hsu_chan_disable(hsuc); + hsuc->desc->status = DMA_PAUSED; + } + spin_unlock_irqrestore(&hsuc->vchan.lock, flags); + + return 0; +} + +static int hsu_dma_resume(struct dma_chan *chan) +{ + struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&hsuc->vchan.lock, flags); + if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) { + hsuc->desc->status = DMA_IN_PROGRESS; + hsu_chan_enable(hsuc); + } + spin_unlock_irqrestore(&hsuc->vchan.lock, flags); + + return 0; +} + +static int hsu_dma_terminate_all(struct dma_chan *chan) +{ + struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&hsuc->vchan.lock, flags); + + hsu_dma_stop_channel(hsuc); + if (hsuc->desc) { + hsu_dma_desc_free(&hsuc->desc->vdesc); + hsuc->desc = NULL; + } + + vchan_get_all_descriptors(&hsuc->vchan, &head); + spin_unlock_irqrestore(&hsuc->vchan.lock, flags); + vchan_dma_desc_free_list(&hsuc->vchan, &head); + + return 0; +} + +static void hsu_dma_free_chan_resources(struct dma_chan *chan) +{ + vchan_free_chan_resources(to_virt_chan(chan)); +} + +static void hsu_dma_synchronize(struct dma_chan *chan) +{ + struct hsu_dma_chan *hsuc = to_hsu_dma_chan(chan); + + vchan_synchronize(&hsuc->vchan); +} + +int hsu_dma_probe(struct hsu_dma_chip *chip) +{ + struct hsu_dma *hsu; + void __iomem *addr = chip->regs + chip->offset; + unsigned short i; + int ret; + + hsu = devm_kzalloc(chip->dev, sizeof(*hsu), GFP_KERNEL); + if (!hsu) + return -ENOMEM; + + chip->hsu = hsu; + + /* Calculate nr_channels from the IO space length */ + hsu->nr_channels = (chip->length - chip->offset) / HSU_DMA_CHAN_LENGTH; + + hsu->chan = devm_kcalloc(chip->dev, hsu->nr_channels, + sizeof(*hsu->chan), GFP_KERNEL); + if (!hsu->chan) + return -ENOMEM; + + INIT_LIST_HEAD(&hsu->dma.channels); + for (i = 0; i < hsu->nr_channels; i++) { + struct hsu_dma_chan *hsuc = &hsu->chan[i]; + + hsuc->vchan.desc_free = hsu_dma_desc_free; + vchan_init(&hsuc->vchan, &hsu->dma); + + hsuc->direction = (i & 0x1) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; + hsuc->reg = addr + i * HSU_DMA_CHAN_LENGTH; + } + + dma_cap_set(DMA_SLAVE, hsu->dma.cap_mask); + dma_cap_set(DMA_PRIVATE, hsu->dma.cap_mask); + + hsu->dma.device_free_chan_resources = hsu_dma_free_chan_resources; + + hsu->dma.device_prep_slave_sg = hsu_dma_prep_slave_sg; + + hsu->dma.device_issue_pending = hsu_dma_issue_pending; + hsu->dma.device_tx_status = hsu_dma_tx_status; + + hsu->dma.device_config = hsu_dma_slave_config; + hsu->dma.device_pause = hsu_dma_pause; + hsu->dma.device_resume = hsu_dma_resume; + hsu->dma.device_terminate_all = hsu_dma_terminate_all; + hsu->dma.device_synchronize = hsu_dma_synchronize; + + hsu->dma.src_addr_widths = HSU_DMA_BUSWIDTHS; + hsu->dma.dst_addr_widths = HSU_DMA_BUSWIDTHS; + hsu->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + hsu->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + + hsu->dma.dev = chip->dev; + + dma_set_max_seg_size(hsu->dma.dev, HSU_CH_DxTSR_MASK); + + ret = dma_async_device_register(&hsu->dma); + if (ret) + return ret; + + dev_info(chip->dev, "Found HSU DMA, %d channels\n", hsu->nr_channels); + return 0; +} +EXPORT_SYMBOL_GPL(hsu_dma_probe); + +int hsu_dma_remove(struct hsu_dma_chip *chip) +{ + struct hsu_dma *hsu = chip->hsu; + unsigned short i; + + dma_async_device_unregister(&hsu->dma); + + for (i = 0; i < hsu->nr_channels; i++) { + struct hsu_dma_chan *hsuc = &hsu->chan[i]; + + tasklet_kill(&hsuc->vchan.task); + } + + return 0; +} +EXPORT_SYMBOL_GPL(hsu_dma_remove); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("High Speed UART DMA core driver"); +MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h new file mode 100644 index 000000000..3bca577b9 --- /dev/null +++ b/drivers/dma/hsu/hsu.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Driver for the High Speed UART DMA + * + * Copyright (C) 2015 Intel Corporation + * + * Partially based on the bits found in drivers/tty/serial/mfd.c. + */ + +#ifndef __DMA_HSU_H__ +#define __DMA_HSU_H__ + +#include <linux/bits.h> +#include <linux/container_of.h> +#include <linux/io.h> +#include <linux/types.h> + +#include <linux/dma/hsu.h> + +#include "../virt-dma.h" + +#define HSU_CH_SR 0x00 /* channel status */ +#define HSU_CH_CR 0x04 /* channel control */ +#define HSU_CH_DCR 0x08 /* descriptor control */ +#define HSU_CH_BSR 0x10 /* FIFO buffer size */ +#define HSU_CH_MTSR 0x14 /* minimum transfer size */ +#define HSU_CH_DxSAR(x) (0x20 + 8 * (x)) /* desc start addr */ +#define HSU_CH_DxTSR(x) (0x24 + 8 * (x)) /* desc transfer size */ +#define HSU_CH_D0SAR 0x20 /* desc 0 start addr */ +#define HSU_CH_D0TSR 0x24 /* desc 0 transfer size */ +#define HSU_CH_D1SAR 0x28 +#define HSU_CH_D1TSR 0x2c +#define HSU_CH_D2SAR 0x30 +#define HSU_CH_D2TSR 0x34 +#define HSU_CH_D3SAR 0x38 +#define HSU_CH_D3TSR 0x3c + +#define HSU_DMA_CHAN_NR_DESC 4 +#define HSU_DMA_CHAN_LENGTH 0x40 + +/* Bits in HSU_CH_SR */ +#define HSU_CH_SR_DESCTO(x) BIT(8 + (x)) +#define HSU_CH_SR_DESCTO_ANY GENMASK(11, 8) +#define HSU_CH_SR_CHE BIT(15) +#define HSU_CH_SR_DESCE(x) BIT(16 + (x)) +#define HSU_CH_SR_DESCE_ANY GENMASK(19, 16) +#define HSU_CH_SR_CDESC_ANY GENMASK(31, 30) + +/* Bits in HSU_CH_CR */ +#define HSU_CH_CR_CHA BIT(0) +#define HSU_CH_CR_CHD BIT(1) + +/* Bits in HSU_CH_DCR */ +#define HSU_CH_DCR_DESCA(x) BIT(0 + (x)) +#define HSU_CH_DCR_CHSOD(x) BIT(8 + (x)) +#define HSU_CH_DCR_CHSOTO BIT(14) +#define HSU_CH_DCR_CHSOE BIT(15) +#define HSU_CH_DCR_CHDI(x) BIT(16 + (x)) +#define HSU_CH_DCR_CHEI BIT(23) +#define HSU_CH_DCR_CHTOI(x) BIT(24 + (x)) + +/* Bits in HSU_CH_DxTSR */ +#define HSU_CH_DxTSR_MASK GENMASK(15, 0) +#define HSU_CH_DxTSR_TSR(x) ((x) & HSU_CH_DxTSR_MASK) + +struct hsu_dma_sg { + dma_addr_t addr; + unsigned int len; +}; + +struct hsu_dma_desc { + struct virt_dma_desc vdesc; + enum dma_transfer_direction direction; + struct hsu_dma_sg *sg; + unsigned int nents; + size_t length; + unsigned int active; + enum dma_status status; +}; + +static inline struct hsu_dma_desc *to_hsu_dma_desc(struct virt_dma_desc *vdesc) +{ + return container_of(vdesc, struct hsu_dma_desc, vdesc); +} + +struct hsu_dma_chan { + struct virt_dma_chan vchan; + + void __iomem *reg; + + /* hardware configuration */ + enum dma_transfer_direction direction; + struct dma_slave_config config; + + struct hsu_dma_desc *desc; +}; + +static inline struct hsu_dma_chan *to_hsu_dma_chan(struct dma_chan *chan) +{ + return container_of(chan, struct hsu_dma_chan, vchan.chan); +} + +static inline u32 hsu_chan_readl(struct hsu_dma_chan *hsuc, int offset) +{ + return readl(hsuc->reg + offset); +} + +static inline void hsu_chan_writel(struct hsu_dma_chan *hsuc, int offset, + u32 value) +{ + writel(value, hsuc->reg + offset); +} + +struct hsu_dma { + struct dma_device dma; + + /* channels */ + struct hsu_dma_chan *chan; + unsigned short nr_channels; +}; + +static inline struct hsu_dma *to_hsu_dma(struct dma_device *ddev) +{ + return container_of(ddev, struct hsu_dma, dma); +} + +#endif /* __DMA_HSU_H__ */ diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c new file mode 100644 index 000000000..0fcc0c0c2 --- /dev/null +++ b/drivers/dma/hsu/pci.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PCI driver for the High Speed UART DMA + * + * Copyright (C) 2015 Intel Corporation + * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> + * + * Partially based on the bits found in drivers/tty/serial/mfd.c. + */ + +#include <linux/bitops.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/pci.h> + +#include "hsu.h" + +#define HSU_PCI_DMASR 0x00 +#define HSU_PCI_DMAISR 0x04 + +#define HSU_PCI_CHAN_OFFSET 0x100 + +#define PCI_DEVICE_ID_INTEL_MFLD_HSU_DMA 0x081e +#define PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA 0x1192 + +static irqreturn_t hsu_pci_irq(int irq, void *dev) +{ + struct hsu_dma_chip *chip = dev; + unsigned long dmaisr; + unsigned short i; + u32 status; + int ret = 0; + int err; + + dmaisr = readl(chip->regs + HSU_PCI_DMAISR); + for_each_set_bit(i, &dmaisr, chip->hsu->nr_channels) { + err = hsu_dma_get_status(chip, i, &status); + if (err > 0) + ret |= 1; + else if (err == 0) + ret |= hsu_dma_do_irq(chip, i, status); + } + + return IRQ_RETVAL(ret); +} + +static void hsu_pci_dma_remove(void *chip) +{ + hsu_dma_remove(chip); +} + +static int hsu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct hsu_dma_chip *chip; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); + if (ret) { + dev_err(&pdev->dev, "I/O memory remapping failed\n"); + return ret; + } + + pci_set_master(pdev); + pci_try_set_mwi(pdev); + + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) + return ret; + + chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (ret < 0) + return ret; + + chip->dev = &pdev->dev; + chip->regs = pcim_iomap_table(pdev)[0]; + chip->length = pci_resource_len(pdev, 0); + chip->offset = HSU_PCI_CHAN_OFFSET; + chip->irq = pci_irq_vector(pdev, 0); + + ret = hsu_dma_probe(chip); + if (ret) + return ret; + + ret = devm_add_action_or_reset(dev, hsu_pci_dma_remove, chip); + if (ret) + return ret; + + ret = devm_request_irq(dev, chip->irq, hsu_pci_irq, 0, "hsu_dma_pci", chip); + if (ret) + return ret; + + /* + * On Intel Tangier B0 and Anniedale the interrupt line, disregarding + * to have different numbers, is shared between HSU DMA and UART IPs. + * Thus on such SoCs we are expecting that IRQ handler is called in + * UART driver only. Instead of handling the spurious interrupt + * from HSU DMA here and waste CPU time and delay HSU UART interrupt + * handling, disable the interrupt entirely. + */ + if (pdev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA) + disable_irq_nosync(chip->irq); + + pci_set_drvdata(pdev, chip); + + return 0; +} + +static const struct pci_device_id hsu_pci_id_table[] = { + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MFLD_HSU_DMA), 0 }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD_HSU_DMA), 0 }, + { } +}; +MODULE_DEVICE_TABLE(pci, hsu_pci_id_table); + +static struct pci_driver hsu_pci_driver = { + .name = "hsu_dma_pci", + .id_table = hsu_pci_id_table, + .probe = hsu_pci_probe, +}; + +module_pci_driver(hsu_pci_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("High Speed UART DMA PCI driver"); +MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); |