From e54def4ad8144ab15f826416e2e0f290ef1901b4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 19 Jun 2024 23:00:30 +0200 Subject: Adding upstream version 6.9.2. Signed-off-by: Daniel Baumann --- kernel/irq/irq_sim.c | 28 +++----- kernel/irq/irqdesc.c | 112 +++++++++++++++++------------- kernel/irq/irqdomain.c | 28 ++++++-- kernel/irq/manage.c | 109 ++++++++++++++--------------- kernel/irq/matrix.c | 28 +++++--- kernel/irq/msi.c | 184 +++++++++++++++++++++++++++++++++++++++---------- 6 files changed, 314 insertions(+), 175 deletions(-) (limited to 'kernel/irq') diff --git a/kernel/irq/irq_sim.c b/kernel/irq/irq_sim.c index dd76323ea3..38d6ae651a 100644 --- a/kernel/irq/irq_sim.c +++ b/kernel/irq/irq_sim.c @@ -4,10 +4,11 @@ * Copyright (C) 2020 Bartosz Golaszewski */ +#include +#include #include #include #include -#include #include struct irq_sim_work_ctx { @@ -19,7 +20,6 @@ struct irq_sim_work_ctx { }; struct irq_sim_irq_ctx { - int irqnum; bool enabled; struct irq_sim_work_ctx *work_ctx; }; @@ -164,33 +164,27 @@ static const struct irq_domain_ops irq_sim_domain_ops = { struct irq_domain *irq_domain_create_sim(struct fwnode_handle *fwnode, unsigned int num_irqs) { - struct irq_sim_work_ctx *work_ctx; + struct irq_sim_work_ctx *work_ctx __free(kfree) = + kmalloc(sizeof(*work_ctx), GFP_KERNEL); - work_ctx = kmalloc(sizeof(*work_ctx), GFP_KERNEL); if (!work_ctx) - goto err_out; + return ERR_PTR(-ENOMEM); - work_ctx->pending = bitmap_zalloc(num_irqs, GFP_KERNEL); - if (!work_ctx->pending) - goto err_free_work_ctx; + unsigned long *pending __free(bitmap) = bitmap_zalloc(num_irqs, GFP_KERNEL); + if (!pending) + return ERR_PTR(-ENOMEM); work_ctx->domain = irq_domain_create_linear(fwnode, num_irqs, &irq_sim_domain_ops, work_ctx); if (!work_ctx->domain) - goto err_free_bitmap; + return ERR_PTR(-ENOMEM); work_ctx->irq_count = num_irqs; work_ctx->work = IRQ_WORK_INIT_HARD(irq_sim_handle_irq); + work_ctx->pending = no_free_ptr(pending); - return work_ctx->domain; - -err_free_bitmap: - bitmap_free(work_ctx->pending); -err_free_work_ctx: - kfree(work_ctx); -err_out: - return ERR_PTR(-ENOMEM); + return no_free_ptr(work_ctx)->domain; } EXPORT_SYMBOL_GPL(irq_domain_create_sim); diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 371eb1711d..4c6b32318c 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -92,11 +92,23 @@ static void desc_smp_init(struct irq_desc *desc, int node, #endif } +static void free_masks(struct irq_desc *desc) +{ +#ifdef CONFIG_GENERIC_PENDING_IRQ + free_cpumask_var(desc->pending_mask); +#endif + free_cpumask_var(desc->irq_common_data.affinity); +#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK + free_cpumask_var(desc->irq_common_data.effective_affinity); +#endif +} + #else static inline int alloc_masks(struct irq_desc *desc, int node) { return 0; } static inline void desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { } +static inline void free_masks(struct irq_desc *desc) { } #endif static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, @@ -165,6 +177,39 @@ static void delete_irq_desc(unsigned int irq) mas_erase(&mas); } +#ifdef CONFIG_SPARSE_IRQ +static const struct kobj_type irq_kobj_type; +#endif + +static int init_desc(struct irq_desc *desc, int irq, int node, + unsigned int flags, + const struct cpumask *affinity, + struct module *owner) +{ + desc->kstat_irqs = alloc_percpu(unsigned int); + if (!desc->kstat_irqs) + return -ENOMEM; + + if (alloc_masks(desc, node)) { + free_percpu(desc->kstat_irqs); + return -ENOMEM; + } + + raw_spin_lock_init(&desc->lock); + lockdep_set_class(&desc->lock, &irq_desc_lock_class); + mutex_init(&desc->request_mutex); + init_waitqueue_head(&desc->wait_for_threads); + desc_set_defaults(irq, desc, node, affinity, owner); + irqd_set(&desc->irq_data, flags); + irq_resend_init(desc); +#ifdef CONFIG_SPARSE_IRQ + kobject_init(&desc->kobj, &irq_kobj_type); + init_rcu_head(&desc->rcu); +#endif + + return 0; +} + #ifdef CONFIG_SPARSE_IRQ static void irq_kobj_release(struct kobject *kobj); @@ -384,21 +429,6 @@ struct irq_desc *irq_to_desc(unsigned int irq) EXPORT_SYMBOL_GPL(irq_to_desc); #endif -#ifdef CONFIG_SMP -static void free_masks(struct irq_desc *desc) -{ -#ifdef CONFIG_GENERIC_PENDING_IRQ - free_cpumask_var(desc->pending_mask); -#endif - free_cpumask_var(desc->irq_common_data.affinity); -#ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK - free_cpumask_var(desc->irq_common_data.effective_affinity); -#endif -} -#else -static inline void free_masks(struct irq_desc *desc) { } -#endif - void irq_lock_sparse(void) { mutex_lock(&sparse_irq_lock); @@ -414,36 +444,19 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags, struct module *owner) { struct irq_desc *desc; + int ret; desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node); if (!desc) return NULL; - /* allocate based on nr_cpu_ids */ - desc->kstat_irqs = alloc_percpu(unsigned int); - if (!desc->kstat_irqs) - goto err_desc; - - if (alloc_masks(desc, node)) - goto err_kstat; - raw_spin_lock_init(&desc->lock); - lockdep_set_class(&desc->lock, &irq_desc_lock_class); - mutex_init(&desc->request_mutex); - init_rcu_head(&desc->rcu); - init_waitqueue_head(&desc->wait_for_threads); - - desc_set_defaults(irq, desc, node, affinity, owner); - irqd_set(&desc->irq_data, flags); - kobject_init(&desc->kobj, &irq_kobj_type); - irq_resend_init(desc); + ret = init_desc(desc, irq, node, flags, affinity, owner); + if (unlikely(ret)) { + kfree(desc); + return NULL; + } return desc; - -err_kstat: - free_percpu(desc->kstat_irqs); -err_desc: - kfree(desc); - return NULL; } static void irq_kobj_release(struct kobject *kobj) @@ -583,26 +596,29 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { int __init early_irq_init(void) { int count, i, node = first_online_node; - struct irq_desc *desc; + int ret; init_irq_default_affinity(); printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS); - desc = irq_desc; count = ARRAY_SIZE(irq_desc); for (i = 0; i < count; i++) { - desc[i].kstat_irqs = alloc_percpu(unsigned int); - alloc_masks(&desc[i], node); - raw_spin_lock_init(&desc[i].lock); - lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); - mutex_init(&desc[i].request_mutex); - init_waitqueue_head(&desc[i].wait_for_threads); - desc_set_defaults(i, &desc[i], node, NULL, NULL); - irq_resend_init(&desc[i]); + ret = init_desc(irq_desc + i, i, node, 0, NULL, NULL); + if (unlikely(ret)) + goto __free_desc_res; } + return arch_early_irq_init(); + +__free_desc_res: + while (--i >= 0) { + free_masks(irq_desc + i); + free_percpu(irq_desc[i].kstat_irqs); + } + + return ret; } struct irq_desc *irq_to_desc(unsigned int irq) diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 0bdef4fe92..3dd1c871e0 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -29,6 +29,7 @@ static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base, unsigned int nr_irqs, int node, void *arg, bool realloc, const struct irq_affinity_desc *affinity); static void irq_domain_check_hierarchy(struct irq_domain *domain); +static void irq_domain_free_one_irq(struct irq_domain *domain, unsigned int virq); struct irqchip_fwid { struct fwnode_handle fwnode; @@ -448,7 +449,7 @@ struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, */ mutex_lock(&irq_domain_mutex); list_for_each_entry(h, &irq_domain_list, link) { - if (h->ops->select && fwspec->param_count) + if (h->ops->select && bus_token != DOMAIN_BUS_ANY) rc = h->ops->select(h, fwspec, bus_token); else if (h->ops->match) rc = h->ops->match(h, to_of_node(fwnode), bus_token); @@ -858,8 +859,13 @@ unsigned int irq_create_fwspec_mapping(struct irq_fwspec *fwspec) } if (irq_domain_is_hierarchy(domain)) { - virq = irq_domain_alloc_irqs_locked(domain, -1, 1, NUMA_NO_NODE, - fwspec, false, NULL); + if (irq_domain_is_msi_device(domain)) { + mutex_unlock(&domain->root->mutex); + virq = msi_device_domain_alloc_wired(domain, hwirq, type); + mutex_lock(&domain->root->mutex); + } else + virq = irq_domain_alloc_irqs_locked(domain, -1, 1, NUMA_NO_NODE, + fwspec, false, NULL); if (virq <= 0) { virq = 0; goto out; @@ -914,7 +920,7 @@ void irq_dispose_mapping(unsigned int virq) return; if (irq_domain_is_hierarchy(domain)) { - irq_domain_free_irqs(virq, 1); + irq_domain_free_one_irq(domain, virq); } else { irq_domain_disassociate(domain, virq); irq_free_desc(virq); @@ -1755,6 +1761,14 @@ void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) irq_free_descs(virq, nr_irqs); } +static void irq_domain_free_one_irq(struct irq_domain *domain, unsigned int virq) +{ + if (irq_domain_is_msi_device(domain)) + msi_device_domain_free_wired(domain, virq); + else + irq_domain_free_irqs(virq, 1); +} + /** * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain * @domain: Domain below which interrupts must be allocated @@ -1907,9 +1921,9 @@ static int irq_domain_alloc_irqs_locked(struct irq_domain *domain, int irq_base, return -EINVAL; } -static void irq_domain_check_hierarchy(struct irq_domain *domain) -{ -} +static void irq_domain_check_hierarchy(struct irq_domain *domain) { } +static void irq_domain_free_one_irq(struct irq_domain *domain, unsigned int virq) { } + #endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ #ifdef CONFIG_GENERIC_IRQ_DEBUGFS diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 7389add527..bf9ae8a868 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -192,10 +192,14 @@ void irq_set_thread_affinity(struct irq_desc *desc) struct irqaction *action; for_each_action_of_desc(desc, action) { - if (action->thread) + if (action->thread) { set_bit(IRQTF_AFFINITY, &action->thread_flags); - if (action->secondary && action->secondary->thread) + wake_up_process(action->thread); + } + if (action->secondary && action->secondary->thread) { set_bit(IRQTF_AFFINITY, &action->secondary->thread_flags); + wake_up_process(action->secondary->thread); + } } } @@ -1049,10 +1053,57 @@ static irqreturn_t irq_forced_secondary_handler(int irq, void *dev_id) return IRQ_NONE; } -static int irq_wait_for_interrupt(struct irqaction *action) +#ifdef CONFIG_SMP +/* + * Check whether we need to change the affinity of the interrupt thread. + */ +static void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) +{ + cpumask_var_t mask; + bool valid = false; + + if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) + return; + + __set_current_state(TASK_RUNNING); + + /* + * In case we are out of memory we set IRQTF_AFFINITY again and + * try again next time + */ + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { + set_bit(IRQTF_AFFINITY, &action->thread_flags); + return; + } + + raw_spin_lock_irq(&desc->lock); + /* + * This code is triggered unconditionally. Check the affinity + * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. + */ + if (cpumask_available(desc->irq_common_data.affinity)) { + const struct cpumask *m; + + m = irq_data_get_effective_affinity_mask(&desc->irq_data); + cpumask_copy(mask, m); + valid = true; + } + raw_spin_unlock_irq(&desc->lock); + + if (valid) + set_cpus_allowed_ptr(current, mask); + free_cpumask_var(mask); +} +#else +static inline void irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } +#endif + +static int irq_wait_for_interrupt(struct irq_desc *desc, + struct irqaction *action) { for (;;) { set_current_state(TASK_INTERRUPTIBLE); + irq_thread_check_affinity(desc, action); if (kthread_should_stop()) { /* may need to run one last time */ @@ -1129,52 +1180,6 @@ out_unlock: chip_bus_sync_unlock(desc); } -#ifdef CONFIG_SMP -/* - * Check whether we need to change the affinity of the interrupt thread. - */ -static void -irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) -{ - cpumask_var_t mask; - bool valid = true; - - if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) - return; - - /* - * In case we are out of memory we set IRQTF_AFFINITY again and - * try again next time - */ - if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { - set_bit(IRQTF_AFFINITY, &action->thread_flags); - return; - } - - raw_spin_lock_irq(&desc->lock); - /* - * This code is triggered unconditionally. Check the affinity - * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out. - */ - if (cpumask_available(desc->irq_common_data.affinity)) { - const struct cpumask *m; - - m = irq_data_get_effective_affinity_mask(&desc->irq_data); - cpumask_copy(mask, m); - } else { - valid = false; - } - raw_spin_unlock_irq(&desc->lock); - - if (valid) - set_cpus_allowed_ptr(current, mask); - free_cpumask_var(mask); -} -#else -static inline void -irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } -#endif - /* * Interrupts which are not explicitly requested as threaded * interrupts rely on the implicit bh/preempt disable of the hard irq @@ -1312,13 +1317,9 @@ static int irq_thread(void *data) init_task_work(&on_exit_work, irq_thread_dtor); task_work_add(current, &on_exit_work, TWA_NONE); - irq_thread_check_affinity(desc, action); - - while (!irq_wait_for_interrupt(action)) { + while (!irq_wait_for_interrupt(desc, action)) { irqreturn_t action_ret; - irq_thread_check_affinity(desc, action); - action_ret = handler_fn(desc, action); if (action_ret == IRQ_WAKE_THREAD) irq_wake_secondary(desc, action); diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c index 75d0ae490e..8f222d1ccc 100644 --- a/kernel/irq/matrix.c +++ b/kernel/irq/matrix.c @@ -8,8 +8,6 @@ #include #include -#define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS)) - struct cpumap { unsigned int available; unsigned int allocated; @@ -17,8 +15,8 @@ struct cpumap { unsigned int managed_allocated; bool initialized; bool online; - unsigned long alloc_map[IRQ_MATRIX_SIZE]; - unsigned long managed_map[IRQ_MATRIX_SIZE]; + unsigned long *managed_map; + unsigned long alloc_map[]; }; struct irq_matrix { @@ -32,8 +30,8 @@ struct irq_matrix { unsigned int total_allocated; unsigned int online_maps; struct cpumap __percpu *maps; - unsigned long scratch_map[IRQ_MATRIX_SIZE]; - unsigned long system_map[IRQ_MATRIX_SIZE]; + unsigned long *system_map; + unsigned long scratch_map[]; }; #define CREATE_TRACE_POINTS @@ -50,24 +48,32 @@ __init struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits, unsigned int alloc_start, unsigned int alloc_end) { + unsigned int cpu, matrix_size = BITS_TO_LONGS(matrix_bits); struct irq_matrix *m; - if (matrix_bits > IRQ_MATRIX_BITS) - return NULL; - - m = kzalloc(sizeof(*m), GFP_KERNEL); + m = kzalloc(struct_size(m, scratch_map, matrix_size * 2), GFP_KERNEL); if (!m) return NULL; + m->system_map = &m->scratch_map[matrix_size]; + m->matrix_bits = matrix_bits; m->alloc_start = alloc_start; m->alloc_end = alloc_end; m->alloc_size = alloc_end - alloc_start; - m->maps = alloc_percpu(*m->maps); + m->maps = __alloc_percpu(struct_size(m->maps, alloc_map, matrix_size * 2), + __alignof__(*m->maps)); if (!m->maps) { kfree(m); return NULL; } + + for_each_possible_cpu(cpu) { + struct cpumap *cm = per_cpu_ptr(m->maps, cpu); + + cm->managed_map = &cm->alloc_map[matrix_size]; + } + return m; } diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c index 79b4a58ba9..f90952ebc4 100644 --- a/kernel/irq/msi.c +++ b/kernel/irq/msi.c @@ -726,11 +726,26 @@ static void msi_domain_free(struct irq_domain *domain, unsigned int virq, irq_domain_free_irqs_top(domain, virq, nr_irqs); } +static int msi_domain_translate(struct irq_domain *domain, struct irq_fwspec *fwspec, + irq_hw_number_t *hwirq, unsigned int *type) +{ + struct msi_domain_info *info = domain->host_data; + + /* + * This will catch allocations through the regular irqdomain path except + * for MSI domains which really support this, e.g. MBIGEN. + */ + if (!info->ops->msi_translate) + return -ENOTSUPP; + return info->ops->msi_translate(domain, fwspec, hwirq, type); +} + static const struct irq_domain_ops msi_domain_ops = { .alloc = msi_domain_alloc, .free = msi_domain_free, .activate = msi_domain_activate, .deactivate = msi_domain_deactivate, + .translate = msi_domain_translate, }; static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info, @@ -830,8 +845,11 @@ static struct irq_domain *__msi_create_irq_domain(struct fwnode_handle *fwnode, domain = irq_domain_create_hierarchy(parent, flags | IRQ_DOMAIN_FLAG_MSI, 0, fwnode, &msi_domain_ops, info); - if (domain) + if (domain) { irq_domain_update_bus_token(domain, info->bus_token); + if (info->flags & MSI_FLAG_PARENT_PM_DEV) + domain->pm_dev = parent->pm_dev; + } return domain; } @@ -945,9 +963,9 @@ bool msi_create_device_irq_domain(struct device *dev, unsigned int domid, void *chip_data) { struct irq_domain *domain, *parent = dev->msi.domain; - const struct msi_parent_ops *pops; + struct fwnode_handle *fwnode, *fwnalloced = NULL; struct msi_domain_template *bundle; - struct fwnode_handle *fwnode; + const struct msi_parent_ops *pops; if (!irq_domain_is_msi_parent(parent)) return false; @@ -970,7 +988,19 @@ bool msi_create_device_irq_domain(struct device *dev, unsigned int domid, pops->prefix ? : "", bundle->chip.name, dev_name(dev)); bundle->chip.name = bundle->name; - fwnode = irq_domain_alloc_named_fwnode(bundle->name); + /* + * Using the device firmware node is required for wire to MSI + * device domains so that the existing firmware results in a domain + * match. + * All other device domains like PCI/MSI use the named firmware + * node as they are not guaranteed to have a fwnode. They are never + * looked up and always handled in the context of the device. + */ + if (bundle->info.flags & MSI_FLAG_USE_DEV_FWNODE) + fwnode = dev->fwnode; + else + fwnode = fwnalloced = irq_domain_alloc_named_fwnode(bundle->name); + if (!fwnode) goto free_bundle; @@ -997,7 +1027,7 @@ bool msi_create_device_irq_domain(struct device *dev, unsigned int domid, fail: msi_unlock_descs(dev); free_fwnode: - irq_domain_free_fwnode(fwnode); + irq_domain_free_fwnode(fwnalloced); free_bundle: kfree(bundle); return false; @@ -1431,34 +1461,10 @@ int msi_domain_alloc_irqs_all_locked(struct device *dev, unsigned int domid, int return msi_domain_alloc_locked(dev, &ctrl); } -/** - * msi_domain_alloc_irq_at - Allocate an interrupt from a MSI interrupt domain at - * a given index - or at the next free index - * - * @dev: Pointer to device struct of the device for which the interrupts - * are allocated - * @domid: Id of the interrupt domain to operate on - * @index: Index for allocation. If @index == %MSI_ANY_INDEX the allocation - * uses the next free index. - * @affdesc: Optional pointer to an interrupt affinity descriptor structure - * @icookie: Optional pointer to a domain specific per instance cookie. If - * non-NULL the content of the cookie is stored in msi_desc::data. - * Must be NULL for MSI-X allocations - * - * This requires a MSI interrupt domain which lets the core code manage the - * MSI descriptors. - * - * Return: struct msi_map - * - * On success msi_map::index contains the allocated index number and - * msi_map::virq the corresponding Linux interrupt number - * - * On failure msi_map::index contains the error code and msi_map::virq - * is %0. - */ -struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, unsigned int index, - const struct irq_affinity_desc *affdesc, - union msi_instance_cookie *icookie) +static struct msi_map __msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, + unsigned int index, + const struct irq_affinity_desc *affdesc, + union msi_instance_cookie *icookie) { struct msi_ctrl ctrl = { .domid = domid, .nirqs = 1, }; struct irq_domain *domain; @@ -1466,17 +1472,16 @@ struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, u struct msi_desc *desc; int ret; - msi_lock_descs(dev); domain = msi_get_device_domain(dev, domid); if (!domain) { map.index = -ENODEV; - goto unlock; + return map; } desc = msi_alloc_desc(dev, 1, affdesc); if (!desc) { map.index = -ENOMEM; - goto unlock; + return map; } if (icookie) @@ -1485,7 +1490,7 @@ struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, u ret = msi_insert_desc(dev, desc, domid, index); if (ret) { map.index = ret; - goto unlock; + return map; } ctrl.first = ctrl.last = desc->msi_index; @@ -1498,11 +1503,90 @@ struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, u map.index = desc->msi_index; map.virq = desc->irq; } -unlock: + return map; +} + +/** + * msi_domain_alloc_irq_at - Allocate an interrupt from a MSI interrupt domain at + * a given index - or at the next free index + * + * @dev: Pointer to device struct of the device for which the interrupts + * are allocated + * @domid: Id of the interrupt domain to operate on + * @index: Index for allocation. If @index == %MSI_ANY_INDEX the allocation + * uses the next free index. + * @affdesc: Optional pointer to an interrupt affinity descriptor structure + * @icookie: Optional pointer to a domain specific per instance cookie. If + * non-NULL the content of the cookie is stored in msi_desc::data. + * Must be NULL for MSI-X allocations + * + * This requires a MSI interrupt domain which lets the core code manage the + * MSI descriptors. + * + * Return: struct msi_map + * + * On success msi_map::index contains the allocated index number and + * msi_map::virq the corresponding Linux interrupt number + * + * On failure msi_map::index contains the error code and msi_map::virq + * is %0. + */ +struct msi_map msi_domain_alloc_irq_at(struct device *dev, unsigned int domid, unsigned int index, + const struct irq_affinity_desc *affdesc, + union msi_instance_cookie *icookie) +{ + struct msi_map map; + + msi_lock_descs(dev); + map = __msi_domain_alloc_irq_at(dev, domid, index, affdesc, icookie); msi_unlock_descs(dev); return map; } +/** + * msi_device_domain_alloc_wired - Allocate a "wired" interrupt on @domain + * @domain: The domain to allocate on + * @hwirq: The hardware interrupt number to allocate for + * @type: The interrupt type + * + * This weirdness supports wire to MSI controllers like MBIGEN. + * + * @hwirq is the hardware interrupt number which is handed in from + * irq_create_fwspec_mapping(). As the wire to MSI domain is sparse, but + * sized in firmware, the hardware interrupt number cannot be used as MSI + * index. For the underlying irq chip the MSI index is irrelevant and + * all it needs is the hardware interrupt number. + * + * To handle this the MSI index is allocated with MSI_ANY_INDEX and the + * hardware interrupt number is stored along with the type information in + * msi_desc::cookie so the underlying interrupt chip and domain code can + * retrieve it. + * + * Return: The Linux interrupt number (> 0) or an error code + */ +int msi_device_domain_alloc_wired(struct irq_domain *domain, unsigned int hwirq, + unsigned int type) +{ + unsigned int domid = MSI_DEFAULT_DOMAIN; + union msi_instance_cookie icookie = { }; + struct device *dev = domain->dev; + struct msi_map map = { }; + + if (WARN_ON_ONCE(!dev || domain->bus_token != DOMAIN_BUS_WIRED_TO_MSI)) + return -EINVAL; + + icookie.value = ((u64)type << 32) | hwirq; + + msi_lock_descs(dev); + if (WARN_ON_ONCE(msi_get_device_domain(dev, domid) != domain)) + map.index = -EINVAL; + else + map = __msi_domain_alloc_irq_at(dev, domid, MSI_ANY_INDEX, NULL, &icookie); + msi_unlock_descs(dev); + + return map.index >= 0 ? map.virq : map.index; +} + static void __msi_domain_free_irqs(struct device *dev, struct irq_domain *domain, struct msi_ctrl *ctrl) { @@ -1628,6 +1712,30 @@ void msi_domain_free_irqs_all(struct device *dev, unsigned int domid) msi_unlock_descs(dev); } +/** + * msi_device_domain_free_wired - Free a wired interrupt in @domain + * @domain: The domain to free the interrupt on + * @virq: The Linux interrupt number to free + * + * This is the counterpart of msi_device_domain_alloc_wired() for the + * weird wired to MSI converting domains. + */ +void msi_device_domain_free_wired(struct irq_domain *domain, unsigned int virq) +{ + struct msi_desc *desc = irq_get_msi_desc(virq); + struct device *dev = domain->dev; + + if (WARN_ON_ONCE(!dev || !desc || domain->bus_token != DOMAIN_BUS_WIRED_TO_MSI)) + return; + + msi_lock_descs(dev); + if (!WARN_ON_ONCE(msi_get_device_domain(dev, MSI_DEFAULT_DOMAIN) != domain)) { + msi_domain_free_irqs_range_locked(dev, MSI_DEFAULT_DOMAIN, desc->msi_index, + desc->msi_index); + } + msi_unlock_descs(dev); +} + /** * msi_get_domain_info - Get the MSI interrupt domain info for @domain * @domain: The interrupt domain to retrieve data from -- cgit v1.2.3