diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/pci/msi | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/pci/msi')
-rw-r--r-- | drivers/pci/msi/Makefile | 6 | ||||
-rw-r--r-- | drivers/pci/msi/api.c | 458 | ||||
-rw-r--r-- | drivers/pci/msi/irqdomain.c | 485 | ||||
-rw-r--r-- | drivers/pci/msi/legacy.c | 80 | ||||
-rw-r--r-- | drivers/pci/msi/msi.c | 915 | ||||
-rw-r--r-- | drivers/pci/msi/msi.h | 129 | ||||
-rw-r--r-- | drivers/pci/msi/pcidev_msi.c | 43 |
7 files changed, 2116 insertions, 0 deletions
diff --git a/drivers/pci/msi/Makefile b/drivers/pci/msi/Makefile new file mode 100644 index 0000000000..839ff72d72 --- /dev/null +++ b/drivers/pci/msi/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the PCI/MSI +obj-$(CONFIG_PCI) += pcidev_msi.o +obj-$(CONFIG_PCI_MSI) += api.o msi.o irqdomain.o +obj-$(CONFIG_PCI_MSI_ARCH_FALLBACKS) += legacy.o diff --git a/drivers/pci/msi/api.c b/drivers/pci/msi/api.c new file mode 100644 index 0000000000..be679aa5db --- /dev/null +++ b/drivers/pci/msi/api.c @@ -0,0 +1,458 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI MSI/MSI-X — Exported APIs for device drivers + * + * Copyright (C) 2003-2004 Intel + * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) + * Copyright (C) 2016 Christoph Hellwig. + * Copyright (C) 2022 Linutronix GmbH + */ + +#include <linux/export.h> +#include <linux/irq.h> + +#include "msi.h" + +/** + * pci_enable_msi() - Enable MSI interrupt mode on device + * @dev: the PCI device to operate on + * + * Legacy device driver API to enable MSI interrupts mode on device and + * allocate a single interrupt vector. On success, the allocated vector + * Linux IRQ will be saved at @dev->irq. The driver must invoke + * pci_disable_msi() on cleanup. + * + * NOTE: The newer pci_alloc_irq_vectors() / pci_free_irq_vectors() API + * pair should, in general, be used instead. + * + * Return: 0 on success, errno otherwise + */ +int pci_enable_msi(struct pci_dev *dev) +{ + int rc = __pci_enable_msi_range(dev, 1, 1, NULL); + if (rc < 0) + return rc; + return 0; +} +EXPORT_SYMBOL(pci_enable_msi); + +/** + * pci_disable_msi() - Disable MSI interrupt mode on device + * @dev: the PCI device to operate on + * + * Legacy device driver API to disable MSI interrupt mode on device, + * free earlier allocated interrupt vectors, and restore INTx emulation. + * The PCI device Linux IRQ (@dev->irq) is restored to its default + * pin-assertion IRQ. This is the cleanup pair of pci_enable_msi(). + * + * NOTE: The newer pci_alloc_irq_vectors() / pci_free_irq_vectors() API + * pair should, in general, be used instead. + */ +void pci_disable_msi(struct pci_dev *dev) +{ + if (!pci_msi_enabled() || !dev || !dev->msi_enabled) + return; + + msi_lock_descs(&dev->dev); + pci_msi_shutdown(dev); + pci_free_msi_irqs(dev); + msi_unlock_descs(&dev->dev); +} +EXPORT_SYMBOL(pci_disable_msi); + +/** + * pci_msix_vec_count() - Get number of MSI-X interrupt vectors on device + * @dev: the PCI device to operate on + * + * Return: number of MSI-X interrupt vectors available on this device + * (i.e., the device's MSI-X capability structure "table size"), -EINVAL + * if the device is not MSI-X capable, other errnos otherwise. + */ +int pci_msix_vec_count(struct pci_dev *dev) +{ + u16 control; + + if (!dev->msix_cap) + return -EINVAL; + + pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); + return msix_table_size(control); +} +EXPORT_SYMBOL(pci_msix_vec_count); + +/** + * pci_enable_msix_range() - Enable MSI-X interrupt mode on device + * @dev: the PCI device to operate on + * @entries: input/output parameter, array of MSI-X configuration entries + * @minvec: minimum required number of MSI-X vectors + * @maxvec: maximum desired number of MSI-X vectors + * + * Legacy device driver API to enable MSI-X interrupt mode on device and + * configure its MSI-X capability structure as appropriate. The passed + * @entries array must have each of its members "entry" field set to a + * desired (valid) MSI-X vector number, where the range of valid MSI-X + * vector numbers can be queried through pci_msix_vec_count(). If + * successful, the driver must invoke pci_disable_msix() on cleanup. + * + * NOTE: The newer pci_alloc_irq_vectors() / pci_free_irq_vectors() API + * pair should, in general, be used instead. + * + * Return: number of MSI-X vectors allocated (which might be smaller + * than @maxvecs), where Linux IRQ numbers for such allocated vectors + * are saved back in the @entries array elements' "vector" field. Return + * -ENOSPC if less than @minvecs interrupt vectors are available. + * Return -EINVAL if one of the passed @entries members "entry" field + * was invalid or a duplicate, or if plain MSI interrupts mode was + * earlier enabled on device. Return other errnos otherwise. + */ +int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, + int minvec, int maxvec) +{ + return __pci_enable_msix_range(dev, entries, minvec, maxvec, NULL, 0); +} +EXPORT_SYMBOL(pci_enable_msix_range); + +/** + * pci_msix_can_alloc_dyn - Query whether dynamic allocation after enabling + * MSI-X is supported + * + * @dev: PCI device to operate on + * + * Return: True if supported, false otherwise + */ +bool pci_msix_can_alloc_dyn(struct pci_dev *dev) +{ + if (!dev->msix_cap) + return false; + + return pci_msi_domain_supports(dev, MSI_FLAG_PCI_MSIX_ALLOC_DYN, DENY_LEGACY); +} +EXPORT_SYMBOL_GPL(pci_msix_can_alloc_dyn); + +/** + * pci_msix_alloc_irq_at - Allocate an MSI-X interrupt after enabling MSI-X + * at a given MSI-X vector index or any free vector index + * + * @dev: PCI device to operate on + * @index: Index to allocate. If @index == MSI_ANY_INDEX this allocates + * the next free index in the MSI-X table + * @affdesc: Optional pointer to an affinity descriptor structure. NULL otherwise + * + * Return: A struct msi_map + * + * On success msi_map::index contains the allocated index (>= 0) and + * msi_map::virq contains the allocated Linux interrupt number (> 0). + * + * On fail msi_map::index contains the error code and msi_map::virq + * is set to 0. + */ +struct msi_map pci_msix_alloc_irq_at(struct pci_dev *dev, unsigned int index, + const struct irq_affinity_desc *affdesc) +{ + struct msi_map map = { .index = -ENOTSUPP }; + + if (!dev->msix_enabled) + return map; + + if (!pci_msix_can_alloc_dyn(dev)) + return map; + + return msi_domain_alloc_irq_at(&dev->dev, MSI_DEFAULT_DOMAIN, index, affdesc, NULL); +} +EXPORT_SYMBOL_GPL(pci_msix_alloc_irq_at); + +/** + * pci_msix_free_irq - Free an interrupt on a PCI/MSIX interrupt domain + * + * @dev: The PCI device to operate on + * @map: A struct msi_map describing the interrupt to free + * + * Undo an interrupt vector allocation. Does not disable MSI-X. + */ +void pci_msix_free_irq(struct pci_dev *dev, struct msi_map map) +{ + if (WARN_ON_ONCE(map.index < 0 || map.virq <= 0)) + return; + if (WARN_ON_ONCE(!pci_msix_can_alloc_dyn(dev))) + return; + msi_domain_free_irqs_range(&dev->dev, MSI_DEFAULT_DOMAIN, map.index, map.index); +} +EXPORT_SYMBOL_GPL(pci_msix_free_irq); + +/** + * pci_disable_msix() - Disable MSI-X interrupt mode on device + * @dev: the PCI device to operate on + * + * Legacy device driver API to disable MSI-X interrupt mode on device, + * free earlier-allocated interrupt vectors, and restore INTx. + * The PCI device Linux IRQ (@dev->irq) is restored to its default pin + * assertion IRQ. This is the cleanup pair of pci_enable_msix_range(). + * + * NOTE: The newer pci_alloc_irq_vectors() / pci_free_irq_vectors() API + * pair should, in general, be used instead. + */ +void pci_disable_msix(struct pci_dev *dev) +{ + if (!pci_msi_enabled() || !dev || !dev->msix_enabled) + return; + + msi_lock_descs(&dev->dev); + pci_msix_shutdown(dev); + pci_free_msi_irqs(dev); + msi_unlock_descs(&dev->dev); +} +EXPORT_SYMBOL(pci_disable_msix); + +/** + * pci_alloc_irq_vectors() - Allocate multiple device interrupt vectors + * @dev: the PCI device to operate on + * @min_vecs: minimum required number of vectors (must be >= 1) + * @max_vecs: maximum desired number of vectors + * @flags: One or more of: + * + * * %PCI_IRQ_MSIX Allow trying MSI-X vector allocations + * * %PCI_IRQ_MSI Allow trying MSI vector allocations + * + * * %PCI_IRQ_LEGACY Allow trying legacy INTx interrupts, if + * and only if @min_vecs == 1 + * + * * %PCI_IRQ_AFFINITY Auto-manage IRQs affinity by spreading + * the vectors around available CPUs + * + * Allocate up to @max_vecs interrupt vectors on device. MSI-X irq + * vector allocation has a higher precedence over plain MSI, which has a + * higher precedence over legacy INTx emulation. + * + * Upon a successful allocation, the caller should use pci_irq_vector() + * to get the Linux IRQ number to be passed to request_threaded_irq(). + * The driver must call pci_free_irq_vectors() on cleanup. + * + * Return: number of allocated vectors (which might be smaller than + * @max_vecs), -ENOSPC if less than @min_vecs interrupt vectors are + * available, other errnos otherwise. + */ +int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags) +{ + return pci_alloc_irq_vectors_affinity(dev, min_vecs, max_vecs, + flags, NULL); +} +EXPORT_SYMBOL(pci_alloc_irq_vectors); + +/** + * pci_alloc_irq_vectors_affinity() - Allocate multiple device interrupt + * vectors with affinity requirements + * @dev: the PCI device to operate on + * @min_vecs: minimum required number of vectors (must be >= 1) + * @max_vecs: maximum desired number of vectors + * @flags: allocation flags, as in pci_alloc_irq_vectors() + * @affd: affinity requirements (can be %NULL). + * + * Same as pci_alloc_irq_vectors(), but with the extra @affd parameter. + * Check that function docs, and &struct irq_affinity, for more details. + */ +int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, + unsigned int max_vecs, unsigned int flags, + struct irq_affinity *affd) +{ + struct irq_affinity msi_default_affd = {0}; + int nvecs = -ENOSPC; + + if (flags & PCI_IRQ_AFFINITY) { + if (!affd) + affd = &msi_default_affd; + } else { + if (WARN_ON(affd)) + affd = NULL; + } + + if (flags & PCI_IRQ_MSIX) { + nvecs = __pci_enable_msix_range(dev, NULL, min_vecs, max_vecs, + affd, flags); + if (nvecs > 0) + return nvecs; + } + + if (flags & PCI_IRQ_MSI) { + nvecs = __pci_enable_msi_range(dev, min_vecs, max_vecs, affd); + if (nvecs > 0) + return nvecs; + } + + /* use legacy IRQ if allowed */ + if (flags & PCI_IRQ_LEGACY) { + if (min_vecs == 1 && dev->irq) { + /* + * Invoke the affinity spreading logic to ensure that + * the device driver can adjust queue configuration + * for the single interrupt case. + */ + if (affd) + irq_create_affinity_masks(1, affd); + pci_intx(dev, 1); + return 1; + } + } + + return nvecs; +} +EXPORT_SYMBOL(pci_alloc_irq_vectors_affinity); + +/** + * pci_irq_vector() - Get Linux IRQ number of a device interrupt vector + * @dev: the PCI device to operate on + * @nr: device-relative interrupt vector index (0-based); has different + * meanings, depending on interrupt mode: + * + * * MSI-X the index in the MSI-X vector table + * * MSI the index of the enabled MSI vectors + * * INTx must be 0 + * + * Return: the Linux IRQ number, or -EINVAL if @nr is out of range + */ +int pci_irq_vector(struct pci_dev *dev, unsigned int nr) +{ + unsigned int irq; + + if (!dev->msi_enabled && !dev->msix_enabled) + return !nr ? dev->irq : -EINVAL; + + irq = msi_get_virq(&dev->dev, nr); + return irq ? irq : -EINVAL; +} +EXPORT_SYMBOL(pci_irq_vector); + +/** + * pci_irq_get_affinity() - Get a device interrupt vector affinity + * @dev: the PCI device to operate on + * @nr: device-relative interrupt vector index (0-based); has different + * meanings, depending on interrupt mode: + * + * * MSI-X the index in the MSI-X vector table + * * MSI the index of the enabled MSI vectors + * * INTx must be 0 + * + * Return: MSI/MSI-X vector affinity, NULL if @nr is out of range or if + * the MSI(-X) vector was allocated without explicit affinity + * requirements (e.g., by pci_enable_msi(), pci_enable_msix_range(), or + * pci_alloc_irq_vectors() without the %PCI_IRQ_AFFINITY flag). Return a + * generic set of CPU IDs representing all possible CPUs available + * during system boot if the device is in legacy INTx mode. + */ +const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr) +{ + int idx, irq = pci_irq_vector(dev, nr); + struct msi_desc *desc; + + if (WARN_ON_ONCE(irq <= 0)) + return NULL; + + desc = irq_get_msi_desc(irq); + /* Non-MSI does not have the information handy */ + if (!desc) + return cpu_possible_mask; + + /* MSI[X] interrupts can be allocated without affinity descriptor */ + if (!desc->affinity) + return NULL; + + /* + * MSI has a mask array in the descriptor. + * MSI-X has a single mask. + */ + idx = dev->msi_enabled ? nr : 0; + return &desc->affinity[idx].mask; +} +EXPORT_SYMBOL(pci_irq_get_affinity); + +/** + * pci_ims_alloc_irq - Allocate an interrupt on a PCI/IMS interrupt domain + * @dev: The PCI device to operate on + * @icookie: Pointer to an IMS implementation specific cookie for this + * IMS instance (PASID, queue ID, pointer...). + * The cookie content is copied into the MSI descriptor for the + * interrupt chip callbacks or domain specific setup functions. + * @affdesc: Optional pointer to an interrupt affinity descriptor + * + * There is no index for IMS allocations as IMS is an implementation + * specific storage and does not have any direct associations between + * index, which might be a pure software construct, and device + * functionality. This association is established by the driver either via + * the index - if there is a hardware table - or in case of purely software + * managed IMS implementation the association happens via the + * irq_write_msi_msg() callback of the implementation specific interrupt + * chip, which utilizes the provided @icookie to store the MSI message in + * the appropriate place. + * + * Return: A struct msi_map + * + * On success msi_map::index contains the allocated index (>= 0) and + * msi_map::virq the allocated Linux interrupt number (> 0). + * + * On fail msi_map::index contains the error code and msi_map::virq + * is set to 0. + */ +struct msi_map pci_ims_alloc_irq(struct pci_dev *dev, union msi_instance_cookie *icookie, + const struct irq_affinity_desc *affdesc) +{ + return msi_domain_alloc_irq_at(&dev->dev, MSI_SECONDARY_DOMAIN, MSI_ANY_INDEX, + affdesc, icookie); +} +EXPORT_SYMBOL_GPL(pci_ims_alloc_irq); + +/** + * pci_ims_free_irq - Allocate an interrupt on a PCI/IMS interrupt domain + * which was allocated via pci_ims_alloc_irq() + * @dev: The PCI device to operate on + * @map: A struct msi_map describing the interrupt to free as + * returned from pci_ims_alloc_irq() + */ +void pci_ims_free_irq(struct pci_dev *dev, struct msi_map map) +{ + if (WARN_ON_ONCE(map.index < 0 || map.virq <= 0)) + return; + msi_domain_free_irqs_range(&dev->dev, MSI_SECONDARY_DOMAIN, map.index, map.index); +} +EXPORT_SYMBOL_GPL(pci_ims_free_irq); + +/** + * pci_free_irq_vectors() - Free previously allocated IRQs for a device + * @dev: the PCI device to operate on + * + * Undo the interrupt vector allocations and possible device MSI/MSI-X + * enablement earlier done through pci_alloc_irq_vectors_affinity() or + * pci_alloc_irq_vectors(). + */ +void pci_free_irq_vectors(struct pci_dev *dev) +{ + pci_disable_msix(dev); + pci_disable_msi(dev); +} +EXPORT_SYMBOL(pci_free_irq_vectors); + +/** + * pci_restore_msi_state() - Restore cached MSI(-X) state on device + * @dev: the PCI device to operate on + * + * Write the Linux-cached MSI(-X) state back on device. This is + * typically useful upon system resume, or after an error-recovery PCI + * adapter reset. + */ +void pci_restore_msi_state(struct pci_dev *dev) +{ + __pci_restore_msi_state(dev); + __pci_restore_msix_state(dev); +} +EXPORT_SYMBOL_GPL(pci_restore_msi_state); + +/** + * pci_msi_enabled() - Are MSI(-X) interrupts enabled system-wide? + * + * Return: true if MSI has not been globally disabled through ACPI FADT, + * PCI bridge quirks, or the "pci=nomsi" kernel command-line option. + */ +int pci_msi_enabled(void) +{ + return pci_msi_enable; +} +EXPORT_SYMBOL(pci_msi_enabled); diff --git a/drivers/pci/msi/irqdomain.c b/drivers/pci/msi/irqdomain.c new file mode 100644 index 0000000000..c8be056c24 --- /dev/null +++ b/drivers/pci/msi/irqdomain.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI Message Signaled Interrupt (MSI) - irqdomain support + */ +#include <linux/acpi_iort.h> +#include <linux/irqdomain.h> +#include <linux/of_irq.h> + +#include "msi.h" + +int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + struct irq_domain *domain; + + domain = dev_get_msi_domain(&dev->dev); + if (domain && irq_domain_is_hierarchy(domain)) + return msi_domain_alloc_irqs_all_locked(&dev->dev, MSI_DEFAULT_DOMAIN, nvec); + + return pci_msi_legacy_setup_msi_irqs(dev, nvec, type); +} + +void pci_msi_teardown_msi_irqs(struct pci_dev *dev) +{ + struct irq_domain *domain; + + domain = dev_get_msi_domain(&dev->dev); + if (domain && irq_domain_is_hierarchy(domain)) { + msi_domain_free_irqs_all_locked(&dev->dev, MSI_DEFAULT_DOMAIN); + } else { + pci_msi_legacy_teardown_msi_irqs(dev); + msi_free_msi_descs(&dev->dev); + } +} + +/** + * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space + * @irq_data: Pointer to interrupt data of the MSI interrupt + * @msg: Pointer to the message + */ +static void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg) +{ + struct msi_desc *desc = irq_data_get_msi_desc(irq_data); + + /* + * For MSI-X desc->irq is always equal to irq_data->irq. For + * MSI only the first interrupt of MULTI MSI passes the test. + */ + if (desc->irq == irq_data->irq) + __pci_write_msi_msg(desc, msg); +} + +/** + * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source + * @desc: Pointer to the MSI descriptor + * + * The ID number is only used within the irqdomain. + */ +static irq_hw_number_t pci_msi_domain_calc_hwirq(struct msi_desc *desc) +{ + struct pci_dev *dev = msi_desc_to_pci_dev(desc); + + return (irq_hw_number_t)desc->msi_index | + pci_dev_id(dev) << 11 | + (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27; +} + +static void pci_msi_domain_set_desc(msi_alloc_info_t *arg, + struct msi_desc *desc) +{ + arg->desc = desc; + arg->hwirq = pci_msi_domain_calc_hwirq(desc); +} + +static struct msi_domain_ops pci_msi_domain_ops_default = { + .set_desc = pci_msi_domain_set_desc, +}; + +static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info) +{ + struct msi_domain_ops *ops = info->ops; + + if (ops == NULL) { + info->ops = &pci_msi_domain_ops_default; + } else { + if (ops->set_desc == NULL) + ops->set_desc = pci_msi_domain_set_desc; + } +} + +static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info) +{ + struct irq_chip *chip = info->chip; + + BUG_ON(!chip); + if (!chip->irq_write_msi_msg) + chip->irq_write_msi_msg = pci_msi_domain_write_msg; + if (!chip->irq_mask) + chip->irq_mask = pci_msi_mask_irq; + if (!chip->irq_unmask) + chip->irq_unmask = pci_msi_unmask_irq; +} + +/** + * pci_msi_create_irq_domain - Create a MSI interrupt domain + * @fwnode: Optional fwnode of the interrupt controller + * @info: MSI domain info + * @parent: Parent irq domain + * + * Updates the domain and chip ops and creates a MSI interrupt domain. + * + * Returns: + * A domain pointer or NULL in case of failure. + */ +struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, + struct msi_domain_info *info, + struct irq_domain *parent) +{ + if (WARN_ON(info->flags & MSI_FLAG_LEVEL_CAPABLE)) + info->flags &= ~MSI_FLAG_LEVEL_CAPABLE; + + if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) + pci_msi_domain_update_dom_ops(info); + if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) + pci_msi_domain_update_chip_ops(info); + + /* Let the core code free MSI descriptors when freeing interrupts */ + info->flags |= MSI_FLAG_FREE_MSI_DESCS; + + info->flags |= MSI_FLAG_ACTIVATE_EARLY | MSI_FLAG_DEV_SYSFS; + if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE)) + info->flags |= MSI_FLAG_MUST_REACTIVATE; + + /* PCI-MSI is oneshot-safe */ + info->chip->flags |= IRQCHIP_ONESHOT_SAFE; + /* Let the core update the bus token */ + info->bus_token = DOMAIN_BUS_PCI_MSI; + + return msi_create_irq_domain(fwnode, info, parent); +} +EXPORT_SYMBOL_GPL(pci_msi_create_irq_domain); + +/* + * Per device MSI[-X] domain functionality + */ +static void pci_device_domain_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) +{ + arg->desc = desc; + arg->hwirq = desc->msi_index; +} + +static void pci_irq_mask_msi(struct irq_data *data) +{ + struct msi_desc *desc = irq_data_get_msi_desc(data); + + pci_msi_mask(desc, BIT(data->irq - desc->irq)); +} + +static void pci_irq_unmask_msi(struct irq_data *data) +{ + struct msi_desc *desc = irq_data_get_msi_desc(data); + + pci_msi_unmask(desc, BIT(data->irq - desc->irq)); +} + +#ifdef CONFIG_GENERIC_IRQ_RESERVATION_MODE +# define MSI_REACTIVATE MSI_FLAG_MUST_REACTIVATE +#else +# define MSI_REACTIVATE 0 +#endif + +#define MSI_COMMON_FLAGS (MSI_FLAG_FREE_MSI_DESCS | \ + MSI_FLAG_ACTIVATE_EARLY | \ + MSI_FLAG_DEV_SYSFS | \ + MSI_REACTIVATE) + +static const struct msi_domain_template pci_msi_template = { + .chip = { + .name = "PCI-MSI", + .irq_mask = pci_irq_mask_msi, + .irq_unmask = pci_irq_unmask_msi, + .irq_write_msi_msg = pci_msi_domain_write_msg, + .flags = IRQCHIP_ONESHOT_SAFE, + }, + + .ops = { + .set_desc = pci_device_domain_set_desc, + }, + + .info = { + .flags = MSI_COMMON_FLAGS | MSI_FLAG_MULTI_PCI_MSI, + .bus_token = DOMAIN_BUS_PCI_DEVICE_MSI, + }, +}; + +static void pci_irq_mask_msix(struct irq_data *data) +{ + pci_msix_mask(irq_data_get_msi_desc(data)); +} + +static void pci_irq_unmask_msix(struct irq_data *data) +{ + pci_msix_unmask(irq_data_get_msi_desc(data)); +} + +static void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg, + struct msi_desc *desc) +{ + /* Don't fiddle with preallocated MSI descriptors */ + if (!desc->pci.mask_base) + msix_prepare_msi_desc(to_pci_dev(desc->dev), desc); +} + +static const struct msi_domain_template pci_msix_template = { + .chip = { + .name = "PCI-MSIX", + .irq_mask = pci_irq_mask_msix, + .irq_unmask = pci_irq_unmask_msix, + .irq_write_msi_msg = pci_msi_domain_write_msg, + .flags = IRQCHIP_ONESHOT_SAFE, + }, + + .ops = { + .prepare_desc = pci_msix_prepare_desc, + .set_desc = pci_device_domain_set_desc, + }, + + .info = { + .flags = MSI_COMMON_FLAGS | MSI_FLAG_PCI_MSIX | + MSI_FLAG_PCI_MSIX_ALLOC_DYN, + .bus_token = DOMAIN_BUS_PCI_DEVICE_MSIX, + }, +}; + +static bool pci_match_device_domain(struct pci_dev *pdev, enum irq_domain_bus_token bus_token) +{ + return msi_match_device_irq_domain(&pdev->dev, MSI_DEFAULT_DOMAIN, bus_token); +} + +static bool pci_create_device_domain(struct pci_dev *pdev, const struct msi_domain_template *tmpl, + unsigned int hwsize) +{ + struct irq_domain *domain = dev_get_msi_domain(&pdev->dev); + + if (!domain || !irq_domain_is_msi_parent(domain)) + return true; + + return msi_create_device_irq_domain(&pdev->dev, MSI_DEFAULT_DOMAIN, tmpl, + hwsize, NULL, NULL); +} + +/** + * pci_setup_msi_device_domain - Setup a device MSI interrupt domain + * @pdev: The PCI device to create the domain on + * + * Return: + * True when: + * - The device does not have a MSI parent irq domain associated, + * which keeps the legacy architecture specific and the global + * PCI/MSI domain models working + * - The MSI domain exists already + * - The MSI domain was successfully allocated + * False when: + * - MSI-X is enabled + * - The domain creation fails. + * + * The created MSI domain is preserved until: + * - The device is removed + * - MSI is disabled and a MSI-X domain is created + */ +bool pci_setup_msi_device_domain(struct pci_dev *pdev) +{ + if (WARN_ON_ONCE(pdev->msix_enabled)) + return false; + + if (pci_match_device_domain(pdev, DOMAIN_BUS_PCI_DEVICE_MSI)) + return true; + if (pci_match_device_domain(pdev, DOMAIN_BUS_PCI_DEVICE_MSIX)) + msi_remove_device_irq_domain(&pdev->dev, MSI_DEFAULT_DOMAIN); + + return pci_create_device_domain(pdev, &pci_msi_template, 1); +} + +/** + * pci_setup_msix_device_domain - Setup a device MSI-X interrupt domain + * @pdev: The PCI device to create the domain on + * @hwsize: The size of the MSI-X vector table + * + * Return: + * True when: + * - The device does not have a MSI parent irq domain associated, + * which keeps the legacy architecture specific and the global + * PCI/MSI domain models working + * - The MSI-X domain exists already + * - The MSI-X domain was successfully allocated + * False when: + * - MSI is enabled + * - The domain creation fails. + * + * The created MSI-X domain is preserved until: + * - The device is removed + * - MSI-X is disabled and a MSI domain is created + */ +bool pci_setup_msix_device_domain(struct pci_dev *pdev, unsigned int hwsize) +{ + if (WARN_ON_ONCE(pdev->msi_enabled)) + return false; + + if (pci_match_device_domain(pdev, DOMAIN_BUS_PCI_DEVICE_MSIX)) + return true; + if (pci_match_device_domain(pdev, DOMAIN_BUS_PCI_DEVICE_MSI)) + msi_remove_device_irq_domain(&pdev->dev, MSI_DEFAULT_DOMAIN); + + return pci_create_device_domain(pdev, &pci_msix_template, hwsize); +} + +/** + * pci_msi_domain_supports - Check for support of a particular feature flag + * @pdev: The PCI device to operate on + * @feature_mask: The feature mask to check for (full match) + * @mode: If ALLOW_LEGACY this grants the feature when there is no irq domain + * associated to the device. If DENY_LEGACY the lack of an irq domain + * makes the feature unsupported + */ +bool pci_msi_domain_supports(struct pci_dev *pdev, unsigned int feature_mask, + enum support_mode mode) +{ + struct msi_domain_info *info; + struct irq_domain *domain; + unsigned int supported; + + domain = dev_get_msi_domain(&pdev->dev); + + if (!domain || !irq_domain_is_hierarchy(domain)) + return mode == ALLOW_LEGACY; + + if (!irq_domain_is_msi_parent(domain)) { + /* + * For "global" PCI/MSI interrupt domains the associated + * msi_domain_info::flags is the authoritative source of + * information. + */ + info = domain->host_data; + supported = info->flags; + } else { + /* + * For MSI parent domains the supported feature set + * is available in the parent ops. This makes checks + * possible before actually instantiating the + * per device domain because the parent is never + * expanding the PCI/MSI functionality. + */ + supported = domain->msi_parent_ops->supported_flags; + } + + return (supported & feature_mask) == feature_mask; +} + +/** + * pci_create_ims_domain - Create a secondary IMS domain for a PCI device + * @pdev: The PCI device to operate on + * @template: The MSI info template which describes the domain + * @hwsize: The size of the hardware entry table or 0 if the domain + * is purely software managed + * @data: Optional pointer to domain specific data to be stored + * in msi_domain_info::data + * + * Return: True on success, false otherwise + * + * An IMS domain is expected to have the following constraints: + * - The index space is managed by the core code + * + * - There is no requirement for consecutive index ranges + * + * - The interrupt chip must provide the following callbacks: + * - irq_mask() + * - irq_unmask() + * - irq_write_msi_msg() + * + * - The interrupt chip must provide the following optional callbacks + * when the irq_mask(), irq_unmask() and irq_write_msi_msg() callbacks + * cannot operate directly on hardware, e.g. in the case that the + * interrupt message store is in queue memory: + * - irq_bus_lock() + * - irq_bus_unlock() + * + * These callbacks are invoked from preemptible task context and are + * allowed to sleep. In this case the mandatory callbacks above just + * store the information. The irq_bus_unlock() callback is supposed + * to make the change effective before returning. + * + * - Interrupt affinity setting is handled by the underlying parent + * interrupt domain and communicated to the IMS domain via + * irq_write_msi_msg(). + * + * The domain is automatically destroyed when the PCI device is removed. + */ +bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template, + unsigned int hwsize, void *data) +{ + struct irq_domain *domain = dev_get_msi_domain(&pdev->dev); + + if (!domain || !irq_domain_is_msi_parent(domain)) + return false; + + if (template->info.bus_token != DOMAIN_BUS_PCI_DEVICE_IMS || + !(template->info.flags & MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS) || + !(template->info.flags & MSI_FLAG_FREE_MSI_DESCS) || + !template->chip.irq_mask || !template->chip.irq_unmask || + !template->chip.irq_write_msi_msg || template->chip.irq_set_affinity) + return false; + + return msi_create_device_irq_domain(&pdev->dev, MSI_SECONDARY_DOMAIN, template, + hwsize, data, NULL); +} +EXPORT_SYMBOL_GPL(pci_create_ims_domain); + +/* + * Users of the generic MSI infrastructure expect a device to have a single ID, + * so with DMA aliases we have to pick the least-worst compromise. Devices with + * DMA phantom functions tend to still emit MSIs from the real function number, + * so we ignore those and only consider topological aliases where either the + * alias device or RID appears on a different bus number. We also make the + * reasonable assumption that bridges are walked in an upstream direction (so + * the last one seen wins), and the much braver assumption that the most likely + * case is that of PCI->PCIe so we should always use the alias RID. This echoes + * the logic from intel_irq_remapping's set_msi_sid(), which presumably works + * well enough in practice; in the face of the horrible PCIe<->PCI-X conditions + * for taking ownership all we can really do is close our eyes and hope... + */ +static int get_msi_id_cb(struct pci_dev *pdev, u16 alias, void *data) +{ + u32 *pa = data; + u8 bus = PCI_BUS_NUM(*pa); + + if (pdev->bus->number != bus || PCI_BUS_NUM(alias) != bus) + *pa = alias; + + return 0; +} + +/** + * pci_msi_domain_get_msi_rid - Get the MSI requester id (RID) + * @domain: The interrupt domain + * @pdev: The PCI device. + * + * The RID for a device is formed from the alias, with a firmware + * supplied mapping applied + * + * Returns: The RID. + */ +u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev) +{ + struct device_node *of_node; + u32 rid = pci_dev_id(pdev); + + pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); + + of_node = irq_domain_get_of_node(domain); + rid = of_node ? of_msi_map_id(&pdev->dev, of_node, rid) : + iort_msi_map_id(&pdev->dev, rid); + + return rid; +} + +/** + * pci_msi_get_device_domain - Get the MSI domain for a given PCI device + * @pdev: The PCI device + * + * Use the firmware data to find a device-specific MSI domain + * (i.e. not one that is set as a default). + * + * Returns: The corresponding MSI domain or NULL if none has been found. + */ +struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) +{ + struct irq_domain *dom; + u32 rid = pci_dev_id(pdev); + + pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); + dom = of_msi_map_get_device_domain(&pdev->dev, rid, DOMAIN_BUS_PCI_MSI); + if (!dom) + dom = iort_get_device_domain(&pdev->dev, rid, + DOMAIN_BUS_PCI_MSI); + return dom; +} diff --git a/drivers/pci/msi/legacy.c b/drivers/pci/msi/legacy.c new file mode 100644 index 0000000000..db761adef6 --- /dev/null +++ b/drivers/pci/msi/legacy.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI Message Signaled Interrupt (MSI). + * + * Legacy architecture specific setup and teardown mechanism. + */ +#include "msi.h" + +/* Arch hooks */ +int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) +{ + return -EINVAL; +} + +void __weak arch_teardown_msi_irq(unsigned int irq) +{ +} + +int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + struct msi_desc *desc; + int ret; + + /* + * If an architecture wants to support multiple MSI, it needs to + * override arch_setup_msi_irqs() + */ + if (type == PCI_CAP_ID_MSI && nvec > 1) + return 1; + + msi_for_each_desc(desc, &dev->dev, MSI_DESC_NOTASSOCIATED) { + ret = arch_setup_msi_irq(dev, desc); + if (ret) + return ret < 0 ? ret : -ENOSPC; + } + + return 0; +} + +void __weak arch_teardown_msi_irqs(struct pci_dev *dev) +{ + struct msi_desc *desc; + int i; + + msi_for_each_desc(desc, &dev->dev, MSI_DESC_ASSOCIATED) { + for (i = 0; i < desc->nvec_used; i++) + arch_teardown_msi_irq(desc->irq + i); + } +} + +static int pci_msi_setup_check_result(struct pci_dev *dev, int type, int ret) +{ + struct msi_desc *desc; + int avail = 0; + + if (type != PCI_CAP_ID_MSIX || ret >= 0) + return ret; + + /* Scan the MSI descriptors for successfully allocated ones. */ + msi_for_each_desc(desc, &dev->dev, MSI_DESC_ASSOCIATED) + avail++; + + return avail ? avail : ret; +} + +int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + int ret = arch_setup_msi_irqs(dev, nvec, type); + + ret = pci_msi_setup_check_result(dev, type, ret); + if (!ret) + ret = msi_device_populate_sysfs(&dev->dev); + return ret; +} + +void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev) +{ + msi_device_destroy_sysfs(&dev->dev); + arch_teardown_msi_irqs(dev); +} diff --git a/drivers/pci/msi/msi.c b/drivers/pci/msi/msi.c new file mode 100644 index 0000000000..ef1d8857a5 --- /dev/null +++ b/drivers/pci/msi/msi.c @@ -0,0 +1,915 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * PCI Message Signaled Interrupt (MSI) + * + * Copyright (C) 2003-2004 Intel + * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) + * Copyright (C) 2016 Christoph Hellwig. + */ +#include <linux/err.h> +#include <linux/export.h> +#include <linux/irq.h> + +#include "../pci.h" +#include "msi.h" + +int pci_msi_enable = 1; +int pci_msi_ignore_mask; + +/** + * pci_msi_supported - check whether MSI may be enabled on a device + * @dev: pointer to the pci_dev data structure of MSI device function + * @nvec: how many MSIs have been requested? + * + * Look at global flags, the device itself, and its parent buses + * to determine if MSI/-X are supported for the device. If MSI/-X is + * supported return 1, else return 0. + **/ +static int pci_msi_supported(struct pci_dev *dev, int nvec) +{ + struct pci_bus *bus; + + /* MSI must be globally enabled and supported by the device */ + if (!pci_msi_enable) + return 0; + + if (!dev || dev->no_msi) + return 0; + + /* + * You can't ask to have 0 or less MSIs configured. + * a) it's stupid .. + * b) the list manipulation code assumes nvec >= 1. + */ + if (nvec < 1) + return 0; + + /* + * Any bridge which does NOT route MSI transactions from its + * secondary bus to its primary bus must set NO_MSI flag on + * the secondary pci_bus. + * + * The NO_MSI flag can either be set directly by: + * - arch-specific PCI host bus controller drivers (deprecated) + * - quirks for specific PCI bridges + * + * or indirectly by platform-specific PCI host bridge drivers by + * advertising the 'msi_domain' property, which results in + * the NO_MSI flag when no MSI domain is found for this bridge + * at probe time. + */ + for (bus = dev->bus; bus; bus = bus->parent) + if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) + return 0; + + return 1; +} + +static void pcim_msi_release(void *pcidev) +{ + struct pci_dev *dev = pcidev; + + dev->is_msi_managed = false; + pci_free_irq_vectors(dev); +} + +/* + * Needs to be separate from pcim_release to prevent an ordering problem + * vs. msi_device_data_release() in the MSI core code. + */ +static int pcim_setup_msi_release(struct pci_dev *dev) +{ + int ret; + + if (!pci_is_managed(dev) || dev->is_msi_managed) + return 0; + + ret = devm_add_action(&dev->dev, pcim_msi_release, dev); + if (!ret) + dev->is_msi_managed = true; + return ret; +} + +/* + * Ordering vs. devres: msi device data has to be installed first so that + * pcim_msi_release() is invoked before it on device release. + */ +static int pci_setup_msi_context(struct pci_dev *dev) +{ + int ret = msi_setup_device_data(&dev->dev); + + if (!ret) + ret = pcim_setup_msi_release(dev); + return ret; +} + +/* + * Helper functions for mask/unmask and MSI message handling + */ + +void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set) +{ + raw_spinlock_t *lock = &to_pci_dev(desc->dev)->msi_lock; + unsigned long flags; + + if (!desc->pci.msi_attrib.can_mask) + return; + + raw_spin_lock_irqsave(lock, flags); + desc->pci.msi_mask &= ~clear; + desc->pci.msi_mask |= set; + pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->pci.mask_pos, + desc->pci.msi_mask); + raw_spin_unlock_irqrestore(lock, flags); +} + +/** + * pci_msi_mask_irq - Generic IRQ chip callback to mask PCI/MSI interrupts + * @data: pointer to irqdata associated to that interrupt + */ +void pci_msi_mask_irq(struct irq_data *data) +{ + struct msi_desc *desc = irq_data_get_msi_desc(data); + + __pci_msi_mask_desc(desc, BIT(data->irq - desc->irq)); +} +EXPORT_SYMBOL_GPL(pci_msi_mask_irq); + +/** + * pci_msi_unmask_irq - Generic IRQ chip callback to unmask PCI/MSI interrupts + * @data: pointer to irqdata associated to that interrupt + */ +void pci_msi_unmask_irq(struct irq_data *data) +{ + struct msi_desc *desc = irq_data_get_msi_desc(data); + + __pci_msi_unmask_desc(desc, BIT(data->irq - desc->irq)); +} +EXPORT_SYMBOL_GPL(pci_msi_unmask_irq); + +void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) +{ + struct pci_dev *dev = msi_desc_to_pci_dev(entry); + + BUG_ON(dev->current_state != PCI_D0); + + if (entry->pci.msi_attrib.is_msix) { + void __iomem *base = pci_msix_desc_addr(entry); + + if (WARN_ON_ONCE(entry->pci.msi_attrib.is_virtual)) + return; + + msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); + msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); + msg->data = readl(base + PCI_MSIX_ENTRY_DATA); + } else { + int pos = dev->msi_cap; + u16 data; + + pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, + &msg->address_lo); + if (entry->pci.msi_attrib.is_64) { + pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, + &msg->address_hi); + pci_read_config_word(dev, pos + PCI_MSI_DATA_64, &data); + } else { + msg->address_hi = 0; + pci_read_config_word(dev, pos + PCI_MSI_DATA_32, &data); + } + msg->data = data; + } +} + +static inline void pci_write_msg_msi(struct pci_dev *dev, struct msi_desc *desc, + struct msi_msg *msg) +{ + int pos = dev->msi_cap; + u16 msgctl; + + pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); + msgctl &= ~PCI_MSI_FLAGS_QSIZE; + msgctl |= desc->pci.msi_attrib.multiple << 4; + pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl); + + pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, msg->address_lo); + if (desc->pci.msi_attrib.is_64) { + pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, msg->address_hi); + pci_write_config_word(dev, pos + PCI_MSI_DATA_64, msg->data); + } else { + pci_write_config_word(dev, pos + PCI_MSI_DATA_32, msg->data); + } + /* Ensure that the writes are visible in the device */ + pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); +} + +static inline void pci_write_msg_msix(struct msi_desc *desc, struct msi_msg *msg) +{ + void __iomem *base = pci_msix_desc_addr(desc); + u32 ctrl = desc->pci.msix_ctrl; + bool unmasked = !(ctrl & PCI_MSIX_ENTRY_CTRL_MASKBIT); + + if (desc->pci.msi_attrib.is_virtual) + return; + /* + * The specification mandates that the entry is masked + * when the message is modified: + * + * "If software changes the Address or Data value of an + * entry while the entry is unmasked, the result is + * undefined." + */ + if (unmasked) + pci_msix_write_vector_ctrl(desc, ctrl | PCI_MSIX_ENTRY_CTRL_MASKBIT); + + writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); + writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); + writel(msg->data, base + PCI_MSIX_ENTRY_DATA); + + if (unmasked) + pci_msix_write_vector_ctrl(desc, ctrl); + + /* Ensure that the writes are visible in the device */ + readl(base + PCI_MSIX_ENTRY_DATA); +} + +void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) +{ + struct pci_dev *dev = msi_desc_to_pci_dev(entry); + + if (dev->current_state != PCI_D0 || pci_dev_is_disconnected(dev)) { + /* Don't touch the hardware now */ + } else if (entry->pci.msi_attrib.is_msix) { + pci_write_msg_msix(entry, msg); + } else { + pci_write_msg_msi(dev, entry, msg); + } + + entry->msg = *msg; + + if (entry->write_msi_msg) + entry->write_msi_msg(entry, entry->write_msi_msg_data); +} + +void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) +{ + struct msi_desc *entry = irq_get_msi_desc(irq); + + __pci_write_msi_msg(entry, msg); +} +EXPORT_SYMBOL_GPL(pci_write_msi_msg); + + +/* PCI/MSI specific functionality */ + +static void pci_intx_for_msi(struct pci_dev *dev, int enable) +{ + if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) + pci_intx(dev, enable); +} + +static void pci_msi_set_enable(struct pci_dev *dev, int enable) +{ + u16 control; + + pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); + control &= ~PCI_MSI_FLAGS_ENABLE; + if (enable) + control |= PCI_MSI_FLAGS_ENABLE; + pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); +} + +static int msi_setup_msi_desc(struct pci_dev *dev, int nvec, + struct irq_affinity_desc *masks) +{ + struct msi_desc desc; + u16 control; + + /* MSI Entry Initialization */ + memset(&desc, 0, sizeof(desc)); + + pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); + /* Lies, damned lies, and MSIs */ + if (dev->dev_flags & PCI_DEV_FLAGS_HAS_MSI_MASKING) + control |= PCI_MSI_FLAGS_MASKBIT; + /* Respect XEN's mask disabling */ + if (pci_msi_ignore_mask) + control &= ~PCI_MSI_FLAGS_MASKBIT; + + desc.nvec_used = nvec; + desc.pci.msi_attrib.is_64 = !!(control & PCI_MSI_FLAGS_64BIT); + desc.pci.msi_attrib.can_mask = !!(control & PCI_MSI_FLAGS_MASKBIT); + desc.pci.msi_attrib.default_irq = dev->irq; + desc.pci.msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; + desc.pci.msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); + desc.affinity = masks; + + if (control & PCI_MSI_FLAGS_64BIT) + desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_64; + else + desc.pci.mask_pos = dev->msi_cap + PCI_MSI_MASK_32; + + /* Save the initial mask status */ + if (desc.pci.msi_attrib.can_mask) + pci_read_config_dword(dev, desc.pci.mask_pos, &desc.pci.msi_mask); + + return msi_insert_msi_desc(&dev->dev, &desc); +} + +static int msi_verify_entries(struct pci_dev *dev) +{ + struct msi_desc *entry; + + if (!dev->no_64bit_msi) + return 0; + + msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { + if (entry->msg.address_hi) { + pci_err(dev, "arch assigned 64-bit MSI address %#x%08x but device only supports 32 bits\n", + entry->msg.address_hi, entry->msg.address_lo); + break; + } + } + return !entry ? 0 : -EIO; +} + +/** + * msi_capability_init - configure device's MSI capability structure + * @dev: pointer to the pci_dev data structure of MSI device function + * @nvec: number of interrupts to allocate + * @affd: description of automatic IRQ affinity assignments (may be %NULL) + * + * Setup the MSI capability structure of the device with the requested + * number of interrupts. A return value of zero indicates the successful + * setup of an entry with the new MSI IRQ. A negative return value indicates + * an error, and a positive return value indicates the number of interrupts + * which could have been allocated. + */ +static int msi_capability_init(struct pci_dev *dev, int nvec, + struct irq_affinity *affd) +{ + struct irq_affinity_desc *masks = NULL; + struct msi_desc *entry; + int ret; + + /* Reject multi-MSI early on irq domain enabled architectures */ + if (nvec > 1 && !pci_msi_domain_supports(dev, MSI_FLAG_MULTI_PCI_MSI, ALLOW_LEGACY)) + return 1; + + /* + * Disable MSI during setup in the hardware, but mark it enabled + * so that setup code can evaluate it. + */ + pci_msi_set_enable(dev, 0); + dev->msi_enabled = 1; + + if (affd) + masks = irq_create_affinity_masks(nvec, affd); + + msi_lock_descs(&dev->dev); + ret = msi_setup_msi_desc(dev, nvec, masks); + if (ret) + goto fail; + + /* All MSIs are unmasked by default; mask them all */ + entry = msi_first_desc(&dev->dev, MSI_DESC_ALL); + pci_msi_mask(entry, msi_multi_mask(entry)); + + /* Configure MSI capability structure */ + ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); + if (ret) + goto err; + + ret = msi_verify_entries(dev); + if (ret) + goto err; + + /* Set MSI enabled bits */ + pci_intx_for_msi(dev, 0); + pci_msi_set_enable(dev, 1); + + pcibios_free_irq(dev); + dev->irq = entry->irq; + goto unlock; + +err: + pci_msi_unmask(entry, msi_multi_mask(entry)); + pci_free_msi_irqs(dev); +fail: + dev->msi_enabled = 0; +unlock: + msi_unlock_descs(&dev->dev); + kfree(masks); + return ret; +} + +int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, + struct irq_affinity *affd) +{ + int nvec; + int rc; + + if (!pci_msi_supported(dev, minvec) || dev->current_state != PCI_D0) + return -EINVAL; + + /* Check whether driver already requested MSI-X IRQs */ + if (dev->msix_enabled) { + pci_info(dev, "can't enable MSI (MSI-X already enabled)\n"); + return -EINVAL; + } + + if (maxvec < minvec) + return -ERANGE; + + if (WARN_ON_ONCE(dev->msi_enabled)) + return -EINVAL; + + nvec = pci_msi_vec_count(dev); + if (nvec < 0) + return nvec; + if (nvec < minvec) + return -ENOSPC; + + if (nvec > maxvec) + nvec = maxvec; + + rc = pci_setup_msi_context(dev); + if (rc) + return rc; + + if (!pci_setup_msi_device_domain(dev)) + return -ENODEV; + + for (;;) { + if (affd) { + nvec = irq_calc_affinity_vectors(minvec, nvec, affd); + if (nvec < minvec) + return -ENOSPC; + } + + rc = msi_capability_init(dev, nvec, affd); + if (rc == 0) + return nvec; + + if (rc < 0) + return rc; + if (rc < minvec) + return -ENOSPC; + + nvec = rc; + } +} + +/** + * pci_msi_vec_count - Return the number of MSI vectors a device can send + * @dev: device to report about + * + * This function returns the number of MSI vectors a device requested via + * Multiple Message Capable register. It returns a negative errno if the + * device is not capable sending MSI interrupts. Otherwise, the call succeeds + * and returns a power of two, up to a maximum of 2^5 (32), according to the + * MSI specification. + **/ +int pci_msi_vec_count(struct pci_dev *dev) +{ + int ret; + u16 msgctl; + + if (!dev->msi_cap) + return -EINVAL; + + pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl); + ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1); + + return ret; +} +EXPORT_SYMBOL(pci_msi_vec_count); + +/* + * Architecture override returns true when the PCI MSI message should be + * written by the generic restore function. + */ +bool __weak arch_restore_msi_irqs(struct pci_dev *dev) +{ + return true; +} + +void __pci_restore_msi_state(struct pci_dev *dev) +{ + struct msi_desc *entry; + u16 control; + + if (!dev->msi_enabled) + return; + + entry = irq_get_msi_desc(dev->irq); + + pci_intx_for_msi(dev, 0); + pci_msi_set_enable(dev, 0); + if (arch_restore_msi_irqs(dev)) + __pci_write_msi_msg(entry, &entry->msg); + + pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); + pci_msi_update_mask(entry, 0, 0); + control &= ~PCI_MSI_FLAGS_QSIZE; + control |= (entry->pci.msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE; + pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); +} + +void pci_msi_shutdown(struct pci_dev *dev) +{ + struct msi_desc *desc; + + if (!pci_msi_enable || !dev || !dev->msi_enabled) + return; + + pci_msi_set_enable(dev, 0); + pci_intx_for_msi(dev, 1); + dev->msi_enabled = 0; + + /* Return the device with MSI unmasked as initial states */ + desc = msi_first_desc(&dev->dev, MSI_DESC_ALL); + if (!WARN_ON_ONCE(!desc)) + pci_msi_unmask(desc, msi_multi_mask(desc)); + + /* Restore dev->irq to its default pin-assertion IRQ */ + dev->irq = desc->pci.msi_attrib.default_irq; + pcibios_alloc_irq(dev); +} + +/* PCI/MSI-X specific functionality */ + +static void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set) +{ + u16 ctrl; + + pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); + ctrl &= ~clear; + ctrl |= set; + pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl); +} + +static void __iomem *msix_map_region(struct pci_dev *dev, + unsigned int nr_entries) +{ + resource_size_t phys_addr; + u32 table_offset; + unsigned long flags; + u8 bir; + + pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE, + &table_offset); + bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); + flags = pci_resource_flags(dev, bir); + if (!flags || (flags & IORESOURCE_UNSET)) + return NULL; + + table_offset &= PCI_MSIX_TABLE_OFFSET; + phys_addr = pci_resource_start(dev, bir) + table_offset; + + return ioremap(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); +} + +/** + * msix_prepare_msi_desc - Prepare a half initialized MSI descriptor for operation + * @dev: The PCI device for which the descriptor is prepared + * @desc: The MSI descriptor for preparation + * + * This is separate from msix_setup_msi_descs() below to handle dynamic + * allocations for MSI-X after initial enablement. + * + * Ideally the whole MSI-X setup would work that way, but there is no way to + * support this for the legacy arch_setup_msi_irqs() mechanism and for the + * fake irq domains like the x86 XEN one. Sigh... + * + * The descriptor is zeroed and only @desc::msi_index and @desc::affinity + * are set. When called from msix_setup_msi_descs() then the is_virtual + * attribute is initialized as well. + * + * Fill in the rest. + */ +void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc) +{ + desc->nvec_used = 1; + desc->pci.msi_attrib.is_msix = 1; + desc->pci.msi_attrib.is_64 = 1; + desc->pci.msi_attrib.default_irq = dev->irq; + desc->pci.mask_base = dev->msix_base; + desc->pci.msi_attrib.can_mask = !pci_msi_ignore_mask && + !desc->pci.msi_attrib.is_virtual; + + if (desc->pci.msi_attrib.can_mask) { + void __iomem *addr = pci_msix_desc_addr(desc); + + desc->pci.msix_ctrl = readl(addr + PCI_MSIX_ENTRY_VECTOR_CTRL); + } +} + +static int msix_setup_msi_descs(struct pci_dev *dev, struct msix_entry *entries, + int nvec, struct irq_affinity_desc *masks) +{ + int ret = 0, i, vec_count = pci_msix_vec_count(dev); + struct irq_affinity_desc *curmsk; + struct msi_desc desc; + + memset(&desc, 0, sizeof(desc)); + + for (i = 0, curmsk = masks; i < nvec; i++, curmsk++) { + desc.msi_index = entries ? entries[i].entry : i; + desc.affinity = masks ? curmsk : NULL; + desc.pci.msi_attrib.is_virtual = desc.msi_index >= vec_count; + + msix_prepare_msi_desc(dev, &desc); + + ret = msi_insert_msi_desc(&dev->dev, &desc); + if (ret) + break; + } + return ret; +} + +static void msix_update_entries(struct pci_dev *dev, struct msix_entry *entries) +{ + struct msi_desc *desc; + + if (entries) { + msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) { + entries->vector = desc->irq; + entries++; + } + } +} + +static void msix_mask_all(void __iomem *base, int tsize) +{ + u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT; + int i; + + if (pci_msi_ignore_mask) + return; + + for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE) + writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL); +} + +static int msix_setup_interrupts(struct pci_dev *dev, struct msix_entry *entries, + int nvec, struct irq_affinity *affd) +{ + struct irq_affinity_desc *masks = NULL; + int ret; + + if (affd) + masks = irq_create_affinity_masks(nvec, affd); + + msi_lock_descs(&dev->dev); + ret = msix_setup_msi_descs(dev, entries, nvec, masks); + if (ret) + goto out_free; + + ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); + if (ret) + goto out_free; + + /* Check if all MSI entries honor device restrictions */ + ret = msi_verify_entries(dev); + if (ret) + goto out_free; + + msix_update_entries(dev, entries); + goto out_unlock; + +out_free: + pci_free_msi_irqs(dev); +out_unlock: + msi_unlock_descs(&dev->dev); + kfree(masks); + return ret; +} + +/** + * msix_capability_init - configure device's MSI-X capability + * @dev: pointer to the pci_dev data structure of MSI-X device function + * @entries: pointer to an array of struct msix_entry entries + * @nvec: number of @entries + * @affd: Optional pointer to enable automatic affinity assignment + * + * Setup the MSI-X capability structure of device function with a + * single MSI-X IRQ. A return of zero indicates the successful setup of + * requested MSI-X entries with allocated IRQs or non-zero for otherwise. + **/ +static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, + int nvec, struct irq_affinity *affd) +{ + int ret, tsize; + u16 control; + + /* + * Some devices require MSI-X to be enabled before the MSI-X + * registers can be accessed. Mask all the vectors to prevent + * interrupts coming in before they're fully set up. + */ + pci_msix_clear_and_set_ctrl(dev, 0, PCI_MSIX_FLAGS_MASKALL | + PCI_MSIX_FLAGS_ENABLE); + + /* Mark it enabled so setup functions can query it */ + dev->msix_enabled = 1; + + pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); + /* Request & Map MSI-X table region */ + tsize = msix_table_size(control); + dev->msix_base = msix_map_region(dev, tsize); + if (!dev->msix_base) { + ret = -ENOMEM; + goto out_disable; + } + + ret = msix_setup_interrupts(dev, entries, nvec, affd); + if (ret) + goto out_disable; + + /* Disable INTX */ + pci_intx_for_msi(dev, 0); + + /* + * Ensure that all table entries are masked to prevent + * stale entries from firing in a crash kernel. + * + * Done late to deal with a broken Marvell NVME device + * which takes the MSI-X mask bits into account even + * when MSI-X is disabled, which prevents MSI delivery. + */ + msix_mask_all(dev->msix_base, tsize); + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); + + pcibios_free_irq(dev); + return 0; + +out_disable: + dev->msix_enabled = 0; + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE, 0); + + return ret; +} + +static bool pci_msix_validate_entries(struct pci_dev *dev, struct msix_entry *entries, int nvec) +{ + bool nogap; + int i, j; + + if (!entries) + return true; + + nogap = pci_msi_domain_supports(dev, MSI_FLAG_MSIX_CONTIGUOUS, DENY_LEGACY); + + for (i = 0; i < nvec; i++) { + /* Check for duplicate entries */ + for (j = i + 1; j < nvec; j++) { + if (entries[i].entry == entries[j].entry) + return false; + } + /* Check for unsupported gaps */ + if (nogap && entries[i].entry != i) + return false; + } + return true; +} + +int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, + int maxvec, struct irq_affinity *affd, int flags) +{ + int hwsize, rc, nvec = maxvec; + + if (maxvec < minvec) + return -ERANGE; + + if (dev->msi_enabled) { + pci_info(dev, "can't enable MSI-X (MSI already enabled)\n"); + return -EINVAL; + } + + if (WARN_ON_ONCE(dev->msix_enabled)) + return -EINVAL; + + /* Check MSI-X early on irq domain enabled architectures */ + if (!pci_msi_domain_supports(dev, MSI_FLAG_PCI_MSIX, ALLOW_LEGACY)) + return -ENOTSUPP; + + if (!pci_msi_supported(dev, nvec) || dev->current_state != PCI_D0) + return -EINVAL; + + hwsize = pci_msix_vec_count(dev); + if (hwsize < 0) + return hwsize; + + if (!pci_msix_validate_entries(dev, entries, nvec)) + return -EINVAL; + + if (hwsize < nvec) { + /* Keep the IRQ virtual hackery working */ + if (flags & PCI_IRQ_VIRTUAL) + hwsize = nvec; + else + nvec = hwsize; + } + + if (nvec < minvec) + return -ENOSPC; + + rc = pci_setup_msi_context(dev); + if (rc) + return rc; + + if (!pci_setup_msix_device_domain(dev, hwsize)) + return -ENODEV; + + for (;;) { + if (affd) { + nvec = irq_calc_affinity_vectors(minvec, nvec, affd); + if (nvec < minvec) + return -ENOSPC; + } + + rc = msix_capability_init(dev, entries, nvec, affd); + if (rc == 0) + return nvec; + + if (rc < 0) + return rc; + if (rc < minvec) + return -ENOSPC; + + nvec = rc; + } +} + +void __pci_restore_msix_state(struct pci_dev *dev) +{ + struct msi_desc *entry; + bool write_msg; + + if (!dev->msix_enabled) + return; + + /* route the table */ + pci_intx_for_msi(dev, 0); + pci_msix_clear_and_set_ctrl(dev, 0, + PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); + + write_msg = arch_restore_msi_irqs(dev); + + msi_lock_descs(&dev->dev); + msi_for_each_desc(entry, &dev->dev, MSI_DESC_ALL) { + if (write_msg) + __pci_write_msi_msg(entry, &entry->msg); + pci_msix_write_vector_ctrl(entry, entry->pci.msix_ctrl); + } + msi_unlock_descs(&dev->dev); + + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); +} + +void pci_msix_shutdown(struct pci_dev *dev) +{ + struct msi_desc *desc; + + if (!pci_msi_enable || !dev || !dev->msix_enabled) + return; + + if (pci_dev_is_disconnected(dev)) { + dev->msix_enabled = 0; + return; + } + + /* Return the device with MSI-X masked as initial states */ + msi_for_each_desc(desc, &dev->dev, MSI_DESC_ALL) + pci_msix_mask(desc); + + pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); + pci_intx_for_msi(dev, 1); + dev->msix_enabled = 0; + pcibios_alloc_irq(dev); +} + +/* Common interfaces */ + +void pci_free_msi_irqs(struct pci_dev *dev) +{ + pci_msi_teardown_msi_irqs(dev); + + if (dev->msix_base) { + iounmap(dev->msix_base); + dev->msix_base = NULL; + } +} + +/* Misc. infrastructure */ + +struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) +{ + return to_pci_dev(desc->dev); +} +EXPORT_SYMBOL(msi_desc_to_pci_dev); + +void pci_no_msi(void) +{ + pci_msi_enable = 0; +} diff --git a/drivers/pci/msi/msi.h b/drivers/pci/msi/msi.h new file mode 100644 index 0000000000..ee53cf079f --- /dev/null +++ b/drivers/pci/msi/msi.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#include <linux/pci.h> +#include <linux/msi.h> + +#define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) + +int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); +void pci_msi_teardown_msi_irqs(struct pci_dev *dev); + +/* Mask/unmask helpers */ +void pci_msi_update_mask(struct msi_desc *desc, u32 clear, u32 set); + +static inline void pci_msi_mask(struct msi_desc *desc, u32 mask) +{ + pci_msi_update_mask(desc, 0, mask); +} + +static inline void pci_msi_unmask(struct msi_desc *desc, u32 mask) +{ + pci_msi_update_mask(desc, mask, 0); +} + +static inline void __iomem *pci_msix_desc_addr(struct msi_desc *desc) +{ + return desc->pci.mask_base + desc->msi_index * PCI_MSIX_ENTRY_SIZE; +} + +/* + * This internal function does not flush PCI writes to the device. All + * users must ensure that they read from the device before either assuming + * that the device state is up to date, or returning out of this file. + * It does not affect the msi_desc::msix_ctrl cache either. Use with care! + */ +static inline void pci_msix_write_vector_ctrl(struct msi_desc *desc, u32 ctrl) +{ + void __iomem *desc_addr = pci_msix_desc_addr(desc); + + if (desc->pci.msi_attrib.can_mask) + writel(ctrl, desc_addr + PCI_MSIX_ENTRY_VECTOR_CTRL); +} + +static inline void pci_msix_mask(struct msi_desc *desc) +{ + desc->pci.msix_ctrl |= PCI_MSIX_ENTRY_CTRL_MASKBIT; + pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl); + /* Flush write to device */ + readl(desc->pci.mask_base); +} + +static inline void pci_msix_unmask(struct msi_desc *desc) +{ + desc->pci.msix_ctrl &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; + pci_msix_write_vector_ctrl(desc, desc->pci.msix_ctrl); +} + +static inline void __pci_msi_mask_desc(struct msi_desc *desc, u32 mask) +{ + if (desc->pci.msi_attrib.is_msix) + pci_msix_mask(desc); + else + pci_msi_mask(desc, mask); +} + +static inline void __pci_msi_unmask_desc(struct msi_desc *desc, u32 mask) +{ + if (desc->pci.msi_attrib.is_msix) + pci_msix_unmask(desc); + else + pci_msi_unmask(desc, mask); +} + +/* + * PCI 2.3 does not specify mask bits for each MSI interrupt. Attempting to + * mask all MSI interrupts by clearing the MSI enable bit does not work + * reliably as devices without an INTx disable bit will then generate a + * level IRQ which will never be cleared. + */ +static inline __attribute_const__ u32 msi_multi_mask(struct msi_desc *desc) +{ + /* Don't shift by >= width of type */ + if (desc->pci.msi_attrib.multi_cap >= 5) + return 0xffffffff; + return (1 << (1 << desc->pci.msi_attrib.multi_cap)) - 1; +} + +void msix_prepare_msi_desc(struct pci_dev *dev, struct msi_desc *desc); + +/* Subsystem variables */ +extern int pci_msi_enable; + +/* MSI internal functions invoked from the public APIs */ +void pci_msi_shutdown(struct pci_dev *dev); +void pci_msix_shutdown(struct pci_dev *dev); +void pci_free_msi_irqs(struct pci_dev *dev); +int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, struct irq_affinity *affd); +int __pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, int minvec, + int maxvec, struct irq_affinity *affd, int flags); +void __pci_restore_msi_state(struct pci_dev *dev); +void __pci_restore_msix_state(struct pci_dev *dev); + +/* irq_domain related functionality */ + +enum support_mode { + ALLOW_LEGACY, + DENY_LEGACY, +}; + +bool pci_msi_domain_supports(struct pci_dev *dev, unsigned int feature_mask, enum support_mode mode); +bool pci_setup_msi_device_domain(struct pci_dev *pdev); +bool pci_setup_msix_device_domain(struct pci_dev *pdev, unsigned int hwsize); + +/* Legacy (!IRQDOMAIN) fallbacks */ + +#ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS +int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type); +void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev); +#else +static inline int pci_msi_legacy_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) +{ + WARN_ON_ONCE(1); + return -ENODEV; +} + +static inline void pci_msi_legacy_teardown_msi_irqs(struct pci_dev *dev) +{ + WARN_ON_ONCE(1); +} +#endif diff --git a/drivers/pci/msi/pcidev_msi.c b/drivers/pci/msi/pcidev_msi.c new file mode 100644 index 0000000000..5520aff53b --- /dev/null +++ b/drivers/pci/msi/pcidev_msi.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * MSI[X} related functions which are available unconditionally. + */ +#include "../pci.h" + +/* + * Disable the MSI[X] hardware to avoid screaming interrupts during boot. + * This is the power on reset default so usually this should be a noop. + */ + +void pci_msi_init(struct pci_dev *dev) +{ + u16 ctrl; + + dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI); + if (!dev->msi_cap) + return; + + pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl); + if (ctrl & PCI_MSI_FLAGS_ENABLE) { + pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, + ctrl & ~PCI_MSI_FLAGS_ENABLE); + } + + if (!(ctrl & PCI_MSI_FLAGS_64BIT)) + dev->no_64bit_msi = 1; +} + +void pci_msix_init(struct pci_dev *dev) +{ + u16 ctrl; + + dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); + if (!dev->msix_cap) + return; + + pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); + if (ctrl & PCI_MSIX_FLAGS_ENABLE) { + pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, + ctrl & ~PCI_MSIX_FLAGS_ENABLE); + } +} |