summaryrefslogtreecommitdiffstats
path: root/drivers/xen
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:40:19 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:40:19 +0000
commit9f0fc191371843c4fc000a226b0a26b6c059aacd (patch)
tree35f8be3ef04506ac891ad001e8c41e535ae8d01d /drivers/xen
parentReleasing progress-linux version 6.6.15-2~progress7.99u1. (diff)
downloadlinux-9f0fc191371843c4fc000a226b0a26b6c059aacd.tar.xz
linux-9f0fc191371843c4fc000a226b0a26b6c059aacd.zip
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/xen')
-rw-r--r--drivers/xen/Kconfig8
-rw-r--r--drivers/xen/balloon.c1
-rw-r--r--drivers/xen/events/events_2l.c8
-rw-r--r--drivers/xen/events/events_base.c571
-rw-r--r--drivers/xen/events/events_fifo.c26
-rw-r--r--drivers/xen/events/events_internal.h1
-rw-r--r--drivers/xen/evtchn.c2
-rw-r--r--drivers/xen/gntdev-dmabuf.c50
-rw-r--r--drivers/xen/grant-table.c10
-rw-r--r--drivers/xen/privcmd.c405
-rw-r--r--drivers/xen/xen-front-pgdir-shbuf.c34
-rw-r--r--drivers/xen/xen-scsiback.c3
-rw-r--r--drivers/xen/xenbus/xenbus_dev_frontend.c4
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c17
14 files changed, 762 insertions, 378 deletions
diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig
index d43153fec1..d5989871dd 100644
--- a/drivers/xen/Kconfig
+++ b/drivers/xen/Kconfig
@@ -269,12 +269,12 @@ config XEN_PRIVCMD
disaggregated Xen setups this driver might be needed for other
domains, too.
-config XEN_PRIVCMD_IRQFD
- bool "Xen irqfd support"
+config XEN_PRIVCMD_EVENTFD
+ bool "Xen Ioeventfd and irqfd support"
depends on XEN_PRIVCMD && XEN_VIRTIO && EVENTFD
help
- Using the irqfd mechanism a virtio backend running in a daemon can
- speed up interrupt injection into a guest.
+ Using the ioeventfd / irqfd mechanism a virtio backend running in a
+ daemon can speed up interrupt delivery from / to a guest.
config XEN_ACPI_PROCESSOR
tristate "Xen ACPI processor"
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 586a167345..976c6cdf9e 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -94,7 +94,6 @@ static struct ctl_table balloon_table[] = {
.extra1 = SYSCTL_ZERO,
.extra2 = SYSCTL_ONE,
},
- { }
};
#else
diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
index b8f2f971c2..e3585330cf 100644
--- a/drivers/xen/events/events_2l.c
+++ b/drivers/xen/events/events_2l.c
@@ -171,11 +171,11 @@ static void evtchn_2l_handle_events(unsigned cpu, struct evtchn_loop_ctrl *ctrl)
int i;
struct shared_info *s = HYPERVISOR_shared_info;
struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
+ evtchn_port_t evtchn;
/* Timer interrupt has highest priority. */
- irq = irq_from_virq(cpu, VIRQ_TIMER);
+ irq = irq_evtchn_from_virq(cpu, VIRQ_TIMER, &evtchn);
if (irq != -1) {
- evtchn_port_t evtchn = evtchn_from_irq(irq);
word_idx = evtchn / BITS_PER_LONG;
bit_idx = evtchn % BITS_PER_LONG;
if (active_evtchns(cpu, s, word_idx) & (1ULL << bit_idx))
@@ -328,9 +328,9 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
for (i = 0; i < EVTCHN_2L_NR_CHANNELS; i++) {
if (sync_test_bit(i, BM(sh->evtchn_pending))) {
int word_idx = i / BITS_PER_EVTCHN_WORD;
- printk(" %d: event %d -> irq %d%s%s%s\n",
+ printk(" %d: event %d -> irq %u%s%s%s\n",
cpu_from_evtchn(i), i,
- get_evtchn_to_irq(i),
+ irq_from_evtchn(i),
sync_test_bit(word_idx, BM(&v->evtchn_pending_sel))
? "" : " l2-clear",
!sync_test_bit(i, BM(sh->evtchn_mask))
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index c50419638a..3b9f080109 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -174,7 +174,7 @@ static int **evtchn_to_irq;
#ifdef CONFIG_X86
static unsigned long *pirq_eoi_map;
#endif
-static bool (*pirq_needs_eoi)(unsigned irq);
+static bool (*pirq_needs_eoi)(struct irq_info *info);
#define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
#define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
@@ -190,7 +190,6 @@ static struct irq_chip xen_lateeoi_chip;
static struct irq_chip xen_percpu_chip;
static struct irq_chip xen_pirq_chip;
static void enable_dynirq(struct irq_data *data);
-static void disable_dynirq(struct irq_data *data);
static DEFINE_PER_CPU(unsigned int, irq_epoch);
@@ -248,15 +247,6 @@ static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
return 0;
}
-int get_evtchn_to_irq(evtchn_port_t evtchn)
-{
- if (evtchn >= xen_evtchn_max_channels())
- return -1;
- if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
- return -1;
- return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
-}
-
/* Get info for IRQ */
static struct irq_info *info_for_irq(unsigned irq)
{
@@ -274,6 +264,19 @@ static void set_info_for_irq(unsigned int irq, struct irq_info *info)
irq_set_chip_data(irq, info);
}
+static struct irq_info *evtchn_to_info(evtchn_port_t evtchn)
+{
+ int irq;
+
+ if (evtchn >= xen_evtchn_max_channels())
+ return NULL;
+ if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
+ return NULL;
+ irq = READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
+
+ return (irq < 0) ? NULL : info_for_irq(irq);
+}
+
/* Per CPU channel accounting */
static void channels_on_cpu_dec(struct irq_info *info)
{
@@ -300,6 +303,13 @@ static void channels_on_cpu_inc(struct irq_info *info)
info->is_accounted = 1;
}
+static void xen_irq_free_desc(unsigned int irq)
+{
+ /* Legacy IRQ descriptors are managed by the arch. */
+ if (irq >= nr_legacy_irqs())
+ irq_free_desc(irq);
+}
+
static void delayed_free_irq(struct work_struct *work)
{
struct irq_info *info = container_of(to_rcu_work(work), struct irq_info,
@@ -311,14 +321,11 @@ static void delayed_free_irq(struct work_struct *work)
kfree(info);
- /* Legacy IRQ descriptors are managed by the arch. */
- if (irq >= nr_legacy_irqs())
- irq_free_desc(irq);
+ xen_irq_free_desc(irq);
}
/* Constructors for packed IRQ information. */
static int xen_irq_info_common_setup(struct irq_info *info,
- unsigned irq,
enum xen_irq_type type,
evtchn_port_t evtchn,
unsigned short cpu)
@@ -328,29 +335,27 @@ static int xen_irq_info_common_setup(struct irq_info *info,
BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
info->type = type;
- info->irq = irq;
info->evtchn = evtchn;
info->cpu = cpu;
info->mask_reason = EVT_MASK_REASON_EXPLICIT;
raw_spin_lock_init(&info->lock);
- ret = set_evtchn_to_irq(evtchn, irq);
+ ret = set_evtchn_to_irq(evtchn, info->irq);
if (ret < 0)
return ret;
- irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
+ irq_clear_status_flags(info->irq, IRQ_NOREQUEST | IRQ_NOAUTOEN);
return xen_evtchn_port_setup(evtchn);
}
-static int xen_irq_info_evtchn_setup(unsigned irq,
+static int xen_irq_info_evtchn_setup(struct irq_info *info,
evtchn_port_t evtchn,
struct xenbus_device *dev)
{
- struct irq_info *info = info_for_irq(irq);
int ret;
- ret = xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
+ ret = xen_irq_info_common_setup(info, IRQT_EVTCHN, evtchn, 0);
info->u.interdomain = dev;
if (dev)
atomic_inc(&dev->event_channels);
@@ -358,50 +363,37 @@ static int xen_irq_info_evtchn_setup(unsigned irq,
return ret;
}
-static int xen_irq_info_ipi_setup(unsigned cpu,
- unsigned irq,
- evtchn_port_t evtchn,
- enum ipi_vector ipi)
+static int xen_irq_info_ipi_setup(struct irq_info *info, unsigned int cpu,
+ evtchn_port_t evtchn, enum ipi_vector ipi)
{
- struct irq_info *info = info_for_irq(irq);
-
info->u.ipi = ipi;
- per_cpu(ipi_to_irq, cpu)[ipi] = irq;
+ per_cpu(ipi_to_irq, cpu)[ipi] = info->irq;
per_cpu(ipi_to_evtchn, cpu)[ipi] = evtchn;
- return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
+ return xen_irq_info_common_setup(info, IRQT_IPI, evtchn, 0);
}
-static int xen_irq_info_virq_setup(unsigned cpu,
- unsigned irq,
- evtchn_port_t evtchn,
- unsigned virq)
+static int xen_irq_info_virq_setup(struct irq_info *info, unsigned int cpu,
+ evtchn_port_t evtchn, unsigned int virq)
{
- struct irq_info *info = info_for_irq(irq);
-
info->u.virq = virq;
- per_cpu(virq_to_irq, cpu)[virq] = irq;
+ per_cpu(virq_to_irq, cpu)[virq] = info->irq;
- return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0);
+ return xen_irq_info_common_setup(info, IRQT_VIRQ, evtchn, 0);
}
-static int xen_irq_info_pirq_setup(unsigned irq,
- evtchn_port_t evtchn,
- unsigned pirq,
- unsigned gsi,
- uint16_t domid,
- unsigned char flags)
+static int xen_irq_info_pirq_setup(struct irq_info *info, evtchn_port_t evtchn,
+ unsigned int pirq, unsigned int gsi,
+ uint16_t domid, unsigned char flags)
{
- struct irq_info *info = info_for_irq(irq);
-
info->u.pirq.pirq = pirq;
info->u.pirq.gsi = gsi;
info->u.pirq.domid = domid;
info->u.pirq.flags = flags;
- return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
+ return xen_irq_info_common_setup(info, IRQT_PIRQ, evtchn, 0);
}
static void xen_irq_info_cleanup(struct irq_info *info)
@@ -415,7 +407,7 @@ static void xen_irq_info_cleanup(struct irq_info *info)
/*
* Accessors for packed IRQ information.
*/
-evtchn_port_t evtchn_from_irq(unsigned irq)
+static evtchn_port_t evtchn_from_irq(unsigned int irq)
{
const struct irq_info *info = NULL;
@@ -429,64 +421,51 @@ evtchn_port_t evtchn_from_irq(unsigned irq)
unsigned int irq_from_evtchn(evtchn_port_t evtchn)
{
- return get_evtchn_to_irq(evtchn);
+ struct irq_info *info = evtchn_to_info(evtchn);
+
+ return info ? info->irq : -1;
}
EXPORT_SYMBOL_GPL(irq_from_evtchn);
-int irq_from_virq(unsigned int cpu, unsigned int virq)
+int irq_evtchn_from_virq(unsigned int cpu, unsigned int virq,
+ evtchn_port_t *evtchn)
{
- return per_cpu(virq_to_irq, cpu)[virq];
+ int irq = per_cpu(virq_to_irq, cpu)[virq];
+
+ *evtchn = evtchn_from_irq(irq);
+
+ return irq;
}
-static enum ipi_vector ipi_from_irq(unsigned irq)
+static enum ipi_vector ipi_from_irq(struct irq_info *info)
{
- struct irq_info *info = info_for_irq(irq);
-
BUG_ON(info == NULL);
BUG_ON(info->type != IRQT_IPI);
return info->u.ipi;
}
-static unsigned virq_from_irq(unsigned irq)
+static unsigned int virq_from_irq(struct irq_info *info)
{
- struct irq_info *info = info_for_irq(irq);
-
BUG_ON(info == NULL);
BUG_ON(info->type != IRQT_VIRQ);
return info->u.virq;
}
-static unsigned pirq_from_irq(unsigned irq)
+static unsigned int pirq_from_irq(struct irq_info *info)
{
- struct irq_info *info = info_for_irq(irq);
-
BUG_ON(info == NULL);
BUG_ON(info->type != IRQT_PIRQ);
return info->u.pirq.pirq;
}
-static enum xen_irq_type type_from_irq(unsigned irq)
-{
- return info_for_irq(irq)->type;
-}
-
-static unsigned cpu_from_irq(unsigned irq)
-{
- return info_for_irq(irq)->cpu;
-}
-
unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
{
- int irq = get_evtchn_to_irq(evtchn);
- unsigned ret = 0;
-
- if (irq != -1)
- ret = cpu_from_irq(irq);
+ struct irq_info *info = evtchn_to_info(evtchn);
- return ret;
+ return info ? info->cpu : 0;
}
static void do_mask(struct irq_info *info, u8 reason)
@@ -518,36 +497,30 @@ static void do_unmask(struct irq_info *info, u8 reason)
}
#ifdef CONFIG_X86
-static bool pirq_check_eoi_map(unsigned irq)
+static bool pirq_check_eoi_map(struct irq_info *info)
{
- return test_bit(pirq_from_irq(irq), pirq_eoi_map);
+ return test_bit(pirq_from_irq(info), pirq_eoi_map);
}
#endif
-static bool pirq_needs_eoi_flag(unsigned irq)
+static bool pirq_needs_eoi_flag(struct irq_info *info)
{
- struct irq_info *info = info_for_irq(irq);
BUG_ON(info->type != IRQT_PIRQ);
return info->u.pirq.flags & PIRQ_NEEDS_EOI;
}
-static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
+static void bind_evtchn_to_cpu(struct irq_info *info, unsigned int cpu,
bool force_affinity)
{
- int irq = get_evtchn_to_irq(evtchn);
- struct irq_info *info = info_for_irq(irq);
-
- BUG_ON(irq == -1);
-
if (IS_ENABLED(CONFIG_SMP) && force_affinity) {
- struct irq_data *data = irq_get_irq_data(irq);
+ struct irq_data *data = irq_get_irq_data(info->irq);
irq_data_update_affinity(data, cpumask_of(cpu));
irq_data_update_effective_affinity(data, cpumask_of(cpu));
}
- xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu);
+ xen_evtchn_port_bind_to_cpu(info->evtchn, cpu, info->cpu);
channels_on_cpu_dec(info);
info->cpu = cpu;
@@ -737,50 +710,49 @@ void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
}
EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
-static void xen_irq_init(unsigned irq)
+static struct irq_info *xen_irq_init(unsigned int irq)
{
struct irq_info *info;
info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (info == NULL)
- panic("Unable to allocate metadata for IRQ%d\n", irq);
+ if (info) {
+ info->irq = irq;
+ info->type = IRQT_UNBOUND;
+ info->refcnt = -1;
+ INIT_RCU_WORK(&info->rwork, delayed_free_irq);
- info->type = IRQT_UNBOUND;
- info->refcnt = -1;
- INIT_RCU_WORK(&info->rwork, delayed_free_irq);
+ set_info_for_irq(irq, info);
+ /*
+ * Interrupt affinity setting can be immediate. No point
+ * in delaying it until an interrupt is handled.
+ */
+ irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
- set_info_for_irq(irq, info);
- /*
- * Interrupt affinity setting can be immediate. No point
- * in delaying it until an interrupt is handled.
- */
- irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
+ INIT_LIST_HEAD(&info->eoi_list);
+ list_add_tail(&info->list, &xen_irq_list_head);
+ }
- INIT_LIST_HEAD(&info->eoi_list);
- list_add_tail(&info->list, &xen_irq_list_head);
+ return info;
}
-static int __must_check xen_allocate_irqs_dynamic(int nvec)
+static struct irq_info *xen_allocate_irq_dynamic(void)
{
- int i, irq = irq_alloc_descs(-1, 0, nvec, -1);
+ int irq = irq_alloc_desc_from(0, -1);
+ struct irq_info *info = NULL;
if (irq >= 0) {
- for (i = 0; i < nvec; i++)
- xen_irq_init(irq + i);
+ info = xen_irq_init(irq);
+ if (!info)
+ xen_irq_free_desc(irq);
}
- return irq;
-}
-
-static inline int __must_check xen_allocate_irq_dynamic(void)
-{
-
- return xen_allocate_irqs_dynamic(1);
+ return info;
}
-static int __must_check xen_allocate_irq_gsi(unsigned gsi)
+static struct irq_info *xen_allocate_irq_gsi(unsigned int gsi)
{
int irq;
+ struct irq_info *info;
/*
* A PV guest has no concept of a GSI (since it has no ACPI
@@ -797,15 +769,15 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi)
else
irq = irq_alloc_desc_at(gsi, -1);
- xen_irq_init(irq);
+ info = xen_irq_init(irq);
+ if (!info)
+ xen_irq_free_desc(irq);
- return irq;
+ return info;
}
-static void xen_free_irq(unsigned irq)
+static void xen_free_irq(struct irq_info *info)
{
- struct irq_info *info = info_for_irq(irq);
-
if (WARN_ON(!info))
return;
@@ -826,14 +798,11 @@ static void event_handler_exit(struct irq_info *info)
clear_evtchn(info->evtchn);
}
-static void pirq_query_unmask(int irq)
+static void pirq_query_unmask(struct irq_info *info)
{
struct physdev_irq_status_query irq_status;
- struct irq_info *info = info_for_irq(irq);
- BUG_ON(info->type != IRQT_PIRQ);
-
- irq_status.irq = pirq_from_irq(irq);
+ irq_status.irq = pirq_from_irq(info);
if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
irq_status.flags = 0;
@@ -842,61 +811,81 @@ static void pirq_query_unmask(int irq)
info->u.pirq.flags |= PIRQ_NEEDS_EOI;
}
-static void eoi_pirq(struct irq_data *data)
+static void do_eoi_pirq(struct irq_info *info)
{
- struct irq_info *info = info_for_irq(data->irq);
- evtchn_port_t evtchn = info ? info->evtchn : 0;
- struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
+ struct physdev_eoi eoi = { .irq = pirq_from_irq(info) };
int rc = 0;
- if (!VALID_EVTCHN(evtchn))
+ if (!VALID_EVTCHN(info->evtchn))
return;
event_handler_exit(info);
- if (pirq_needs_eoi(data->irq)) {
+ if (pirq_needs_eoi(info)) {
rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
WARN_ON(rc);
}
}
+static void eoi_pirq(struct irq_data *data)
+{
+ struct irq_info *info = info_for_irq(data->irq);
+
+ do_eoi_pirq(info);
+}
+
+static void do_disable_dynirq(struct irq_info *info)
+{
+ if (VALID_EVTCHN(info->evtchn))
+ do_mask(info, EVT_MASK_REASON_EXPLICIT);
+}
+
+static void disable_dynirq(struct irq_data *data)
+{
+ struct irq_info *info = info_for_irq(data->irq);
+
+ if (info)
+ do_disable_dynirq(info);
+}
+
static void mask_ack_pirq(struct irq_data *data)
{
- disable_dynirq(data);
- eoi_pirq(data);
+ struct irq_info *info = info_for_irq(data->irq);
+
+ if (info) {
+ do_disable_dynirq(info);
+ do_eoi_pirq(info);
+ }
}
-static unsigned int __startup_pirq(unsigned int irq)
+static unsigned int __startup_pirq(struct irq_info *info)
{
struct evtchn_bind_pirq bind_pirq;
- struct irq_info *info = info_for_irq(irq);
- evtchn_port_t evtchn = evtchn_from_irq(irq);
+ evtchn_port_t evtchn = info->evtchn;
int rc;
- BUG_ON(info->type != IRQT_PIRQ);
-
if (VALID_EVTCHN(evtchn))
goto out;
- bind_pirq.pirq = pirq_from_irq(irq);
+ bind_pirq.pirq = pirq_from_irq(info);
/* NB. We are happy to share unless we are probing. */
bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
BIND_PIRQ__WILL_SHARE : 0;
rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
if (rc != 0) {
- pr_warn("Failed to obtain physical IRQ %d\n", irq);
+ pr_warn("Failed to obtain physical IRQ %d\n", info->irq);
return 0;
}
evtchn = bind_pirq.port;
- pirq_query_unmask(irq);
+ pirq_query_unmask(info);
- rc = set_evtchn_to_irq(evtchn, irq);
+ rc = set_evtchn_to_irq(evtchn, info->irq);
if (rc)
goto err;
info->evtchn = evtchn;
- bind_evtchn_to_cpu(evtchn, 0, false);
+ bind_evtchn_to_cpu(info, 0, false);
rc = xen_evtchn_port_setup(evtchn);
if (rc)
@@ -905,26 +894,28 @@ static unsigned int __startup_pirq(unsigned int irq)
out:
do_unmask(info, EVT_MASK_REASON_EXPLICIT);
- eoi_pirq(irq_get_irq_data(irq));
+ do_eoi_pirq(info);
return 0;
err:
- pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
+ pr_err("irq%d: Failed to set port to irq mapping (%d)\n", info->irq,
+ rc);
xen_evtchn_close(evtchn);
return 0;
}
static unsigned int startup_pirq(struct irq_data *data)
{
- return __startup_pirq(data->irq);
+ struct irq_info *info = info_for_irq(data->irq);
+
+ return __startup_pirq(info);
}
static void shutdown_pirq(struct irq_data *data)
{
- unsigned int irq = data->irq;
- struct irq_info *info = info_for_irq(irq);
- evtchn_port_t evtchn = evtchn_from_irq(irq);
+ struct irq_info *info = info_for_irq(data->irq);
+ evtchn_port_t evtchn = info->evtchn;
BUG_ON(info->type != IRQT_PIRQ);
@@ -932,8 +923,8 @@ static void shutdown_pirq(struct irq_data *data)
return;
do_mask(info, EVT_MASK_REASON_EXPLICIT);
- xen_evtchn_close(evtchn);
xen_irq_info_cleanup(info);
+ xen_evtchn_close(evtchn);
}
static void enable_pirq(struct irq_data *data)
@@ -962,10 +953,15 @@ int xen_irq_from_gsi(unsigned gsi)
}
EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
-static void __unbind_from_irq(unsigned int irq)
+static void __unbind_from_irq(struct irq_info *info, unsigned int irq)
{
- evtchn_port_t evtchn = evtchn_from_irq(irq);
- struct irq_info *info = info_for_irq(irq);
+ evtchn_port_t evtchn;
+ bool close_evtchn = false;
+
+ if (!info) {
+ xen_irq_free_desc(irq);
+ return;
+ }
if (info->refcnt > 0) {
info->refcnt--;
@@ -973,20 +969,22 @@ static void __unbind_from_irq(unsigned int irq)
return;
}
+ evtchn = info->evtchn;
+
if (VALID_EVTCHN(evtchn)) {
- unsigned int cpu = cpu_from_irq(irq);
+ unsigned int cpu = info->cpu;
struct xenbus_device *dev;
if (!info->is_static)
- xen_evtchn_close(evtchn);
+ close_evtchn = true;
- switch (type_from_irq(irq)) {
+ switch (info->type) {
case IRQT_VIRQ:
- per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
+ per_cpu(virq_to_irq, cpu)[virq_from_irq(info)] = -1;
break;
case IRQT_IPI:
- per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
- per_cpu(ipi_to_evtchn, cpu)[ipi_from_irq(irq)] = 0;
+ per_cpu(ipi_to_irq, cpu)[ipi_from_irq(info)] = -1;
+ per_cpu(ipi_to_evtchn, cpu)[ipi_from_irq(info)] = 0;
break;
case IRQT_EVTCHN:
dev = info->u.interdomain;
@@ -998,9 +996,12 @@ static void __unbind_from_irq(unsigned int irq)
}
xen_irq_info_cleanup(info);
+
+ if (close_evtchn)
+ xen_evtchn_close(evtchn);
}
- xen_free_irq(irq);
+ xen_free_irq(info);
}
/*
@@ -1016,24 +1017,24 @@ static void __unbind_from_irq(unsigned int irq)
int xen_bind_pirq_gsi_to_irq(unsigned gsi,
unsigned pirq, int shareable, char *name)
{
- int irq;
+ struct irq_info *info;
struct physdev_irq irq_op;
int ret;
mutex_lock(&irq_mapping_update_lock);
- irq = xen_irq_from_gsi(gsi);
- if (irq != -1) {
+ ret = xen_irq_from_gsi(gsi);
+ if (ret != -1) {
pr_info("%s: returning irq %d for gsi %u\n",
- __func__, irq, gsi);
+ __func__, ret, gsi);
goto out;
}
- irq = xen_allocate_irq_gsi(gsi);
- if (irq < 0)
+ info = xen_allocate_irq_gsi(gsi);
+ if (!info)
goto out;
- irq_op.irq = irq;
+ irq_op.irq = info->irq;
irq_op.vector = 0;
/* Only the privileged domain can do this. For non-priv, the pcifront
@@ -1041,20 +1042,19 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
* this in the priv domain. */
if (xen_initial_domain() &&
HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
- xen_free_irq(irq);
- irq = -ENOSPC;
+ xen_free_irq(info);
+ ret = -ENOSPC;
goto out;
}
- ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF,
+ ret = xen_irq_info_pirq_setup(info, 0, pirq, gsi, DOMID_SELF,
shareable ? PIRQ_SHAREABLE : 0);
if (ret < 0) {
- __unbind_from_irq(irq);
- irq = ret;
+ __unbind_from_irq(info, info->irq);
goto out;
}
- pirq_query_unmask(irq);
+ pirq_query_unmask(info);
/* We try to use the handler with the appropriate semantic for the
* type of interrupt: if the interrupt is an edge triggered
* interrupt we use handle_edge_irq.
@@ -1071,16 +1071,18 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
* is the right choice either way.
*/
if (shareable)
- irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
+ irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip,
handle_fasteoi_irq, name);
else
- irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
+ irq_set_chip_and_handler_name(info->irq, &xen_pirq_chip,
handle_edge_irq, name);
+ ret = info->irq;
+
out:
mutex_unlock(&irq_mapping_update_lock);
- return irq;
+ return ret;
}
#ifdef CONFIG_PCI_MSI
@@ -1102,17 +1104,24 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
int pirq, int nvec, const char *name, domid_t domid)
{
int i, irq, ret;
+ struct irq_info *info;
mutex_lock(&irq_mapping_update_lock);
- irq = xen_allocate_irqs_dynamic(nvec);
+ irq = irq_alloc_descs(-1, 0, nvec, -1);
if (irq < 0)
goto out;
for (i = 0; i < nvec; i++) {
+ info = xen_irq_init(irq + i);
+ if (!info) {
+ ret = -ENOMEM;
+ goto error_irq;
+ }
+
irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
- ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid,
+ ret = xen_irq_info_pirq_setup(info, 0, pirq + i, 0, domid,
i == 0 ? 0 : PIRQ_MSI_GROUP);
if (ret < 0)
goto error_irq;
@@ -1124,9 +1133,12 @@ int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
out:
mutex_unlock(&irq_mapping_update_lock);
return irq;
+
error_irq:
- while (nvec--)
- __unbind_from_irq(irq + nvec);
+ while (nvec--) {
+ info = info_for_irq(irq + nvec);
+ __unbind_from_irq(info, irq + nvec);
+ }
mutex_unlock(&irq_mapping_update_lock);
return ret;
}
@@ -1162,67 +1174,45 @@ int xen_destroy_irq(int irq)
}
}
- xen_free_irq(irq);
+ xen_free_irq(info);
out:
mutex_unlock(&irq_mapping_update_lock);
return rc;
}
-int xen_irq_from_pirq(unsigned pirq)
-{
- int irq;
-
- struct irq_info *info;
-
- mutex_lock(&irq_mapping_update_lock);
-
- list_for_each_entry(info, &xen_irq_list_head, list) {
- if (info->type != IRQT_PIRQ)
- continue;
- irq = info->irq;
- if (info->u.pirq.pirq == pirq)
- goto out;
- }
- irq = -1;
-out:
- mutex_unlock(&irq_mapping_update_lock);
-
- return irq;
-}
-
-
int xen_pirq_from_irq(unsigned irq)
{
- return pirq_from_irq(irq);
+ struct irq_info *info = info_for_irq(irq);
+
+ return pirq_from_irq(info);
}
EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
struct xenbus_device *dev)
{
- int irq;
- int ret;
+ int ret = -ENOMEM;
+ struct irq_info *info;
if (evtchn >= xen_evtchn_max_channels())
return -ENOMEM;
mutex_lock(&irq_mapping_update_lock);
- irq = get_evtchn_to_irq(evtchn);
+ info = evtchn_to_info(evtchn);
- if (irq == -1) {
- irq = xen_allocate_irq_dynamic();
- if (irq < 0)
+ if (!info) {
+ info = xen_allocate_irq_dynamic();
+ if (!info)
goto out;
- irq_set_chip_and_handler_name(irq, chip,
+ irq_set_chip_and_handler_name(info->irq, chip,
handle_edge_irq, "event");
- ret = xen_irq_info_evtchn_setup(irq, evtchn, dev);
+ ret = xen_irq_info_evtchn_setup(info, evtchn, dev);
if (ret < 0) {
- __unbind_from_irq(irq);
- irq = ret;
+ __unbind_from_irq(info, info->irq);
goto out;
}
/*
@@ -1232,16 +1222,17 @@ static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
* affinity setting is not invoked on them so nothing would
* bind the channel.
*/
- bind_evtchn_to_cpu(evtchn, 0, false);
- } else {
- struct irq_info *info = info_for_irq(irq);
- WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
+ bind_evtchn_to_cpu(info, 0, false);
+ } else if (!WARN_ON(info->type != IRQT_EVTCHN)) {
+ info->refcnt++;
}
+ ret = info->irq;
+
out:
mutex_unlock(&irq_mapping_update_lock);
- return irq;
+ return ret;
}
int bind_evtchn_to_irq(evtchn_port_t evtchn)
@@ -1260,18 +1251,19 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
{
struct evtchn_bind_ipi bind_ipi;
evtchn_port_t evtchn;
- int ret, irq;
+ struct irq_info *info;
+ int ret;
mutex_lock(&irq_mapping_update_lock);
- irq = per_cpu(ipi_to_irq, cpu)[ipi];
+ ret = per_cpu(ipi_to_irq, cpu)[ipi];
- if (irq == -1) {
- irq = xen_allocate_irq_dynamic();
- if (irq < 0)
+ if (ret == -1) {
+ info = xen_allocate_irq_dynamic();
+ if (!info)
goto out;
- irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
+ irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip,
handle_percpu_irq, "ipi");
bind_ipi.vcpu = xen_vcpu_nr(cpu);
@@ -1280,25 +1272,25 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
BUG();
evtchn = bind_ipi.port;
- ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
+ ret = xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
if (ret < 0) {
- __unbind_from_irq(irq);
- irq = ret;
+ __unbind_from_irq(info, info->irq);
goto out;
}
/*
* Force the affinity mask to the target CPU so proc shows
* the correct target.
*/
- bind_evtchn_to_cpu(evtchn, cpu, true);
+ bind_evtchn_to_cpu(info, cpu, true);
+ ret = info->irq;
} else {
- struct irq_info *info = info_for_irq(irq);
+ info = info_for_irq(ret);
WARN_ON(info == NULL || info->type != IRQT_IPI);
}
out:
mutex_unlock(&irq_mapping_update_lock);
- return irq;
+ return ret;
}
static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev,
@@ -1366,22 +1358,23 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
{
struct evtchn_bind_virq bind_virq;
evtchn_port_t evtchn = 0;
- int irq, ret;
+ struct irq_info *info;
+ int ret;
mutex_lock(&irq_mapping_update_lock);
- irq = per_cpu(virq_to_irq, cpu)[virq];
+ ret = per_cpu(virq_to_irq, cpu)[virq];
- if (irq == -1) {
- irq = xen_allocate_irq_dynamic();
- if (irq < 0)
+ if (ret == -1) {
+ info = xen_allocate_irq_dynamic();
+ if (!info)
goto out;
if (percpu)
- irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
+ irq_set_chip_and_handler_name(info->irq, &xen_percpu_chip,
handle_percpu_irq, "virq");
else
- irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
+ irq_set_chip_and_handler_name(info->irq, &xen_dynamic_chip,
handle_edge_irq, "virq");
bind_virq.virq = virq;
@@ -1396,10 +1389,9 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
BUG_ON(ret < 0);
}
- ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
+ ret = xen_irq_info_virq_setup(info, cpu, evtchn, virq);
if (ret < 0) {
- __unbind_from_irq(irq);
- irq = ret;
+ __unbind_from_irq(info, info->irq);
goto out;
}
@@ -1407,22 +1399,26 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
* Force the affinity mask for percpu interrupts so proc
* shows the correct target.
*/
- bind_evtchn_to_cpu(evtchn, cpu, percpu);
+ bind_evtchn_to_cpu(info, cpu, percpu);
+ ret = info->irq;
} else {
- struct irq_info *info = info_for_irq(irq);
+ info = info_for_irq(ret);
WARN_ON(info == NULL || info->type != IRQT_VIRQ);
}
out:
mutex_unlock(&irq_mapping_update_lock);
- return irq;
+ return ret;
}
static void unbind_from_irq(unsigned int irq)
{
+ struct irq_info *info;
+
mutex_lock(&irq_mapping_update_lock);
- __unbind_from_irq(irq);
+ info = info_for_irq(irq);
+ __unbind_from_irq(info, irq);
mutex_unlock(&irq_mapping_update_lock);
}
@@ -1573,13 +1569,7 @@ EXPORT_SYMBOL_GPL(xen_set_irq_priority);
int evtchn_make_refcounted(evtchn_port_t evtchn, bool is_static)
{
- int irq = get_evtchn_to_irq(evtchn);
- struct irq_info *info;
-
- if (irq == -1)
- return -ENOENT;
-
- info = info_for_irq(irq);
+ struct irq_info *info = evtchn_to_info(evtchn);
if (!info)
return -ENOENT;
@@ -1595,7 +1585,6 @@ EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
int evtchn_get(evtchn_port_t evtchn)
{
- int irq;
struct irq_info *info;
int err = -ENOENT;
@@ -1604,11 +1593,7 @@ int evtchn_get(evtchn_port_t evtchn)
mutex_lock(&irq_mapping_update_lock);
- irq = get_evtchn_to_irq(evtchn);
- if (irq == -1)
- goto done;
-
- info = info_for_irq(irq);
+ info = evtchn_to_info(evtchn);
if (!info)
goto done;
@@ -1628,10 +1613,11 @@ EXPORT_SYMBOL_GPL(evtchn_get);
void evtchn_put(evtchn_port_t evtchn)
{
- int irq = get_evtchn_to_irq(evtchn);
- if (WARN_ON(irq == -1))
+ struct irq_info *info = evtchn_to_info(evtchn);
+
+ if (WARN_ON(!info))
return;
- unbind_from_irq(irq);
+ unbind_from_irq(info->irq);
}
EXPORT_SYMBOL_GPL(evtchn_put);
@@ -1661,12 +1647,10 @@ struct evtchn_loop_ctrl {
void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
{
- int irq;
- struct irq_info *info;
+ struct irq_info *info = evtchn_to_info(port);
struct xenbus_device *dev;
- irq = get_evtchn_to_irq(port);
- if (irq == -1)
+ if (!info)
return;
/*
@@ -1691,7 +1675,6 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
}
}
- info = info_for_irq(irq);
if (xchg_acquire(&info->is_active, 1))
return;
@@ -1705,7 +1688,7 @@ void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
info->eoi_time = get_jiffies_64() + event_eoi_delay;
}
- generic_handle_irq(irq);
+ generic_handle_irq(info->irq);
}
int xen_evtchn_do_upcall(void)
@@ -1763,16 +1746,17 @@ void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
mutex_lock(&irq_mapping_update_lock);
/* After resume the irq<->evtchn mappings are all cleared out */
- BUG_ON(get_evtchn_to_irq(evtchn) != -1);
+ BUG_ON(evtchn_to_info(evtchn));
/* Expect irq to have been bound before,
so there should be a proper type */
BUG_ON(info->type == IRQT_UNBOUND);
- (void)xen_irq_info_evtchn_setup(irq, evtchn, NULL);
+ info->irq = irq;
+ (void)xen_irq_info_evtchn_setup(info, evtchn, NULL);
mutex_unlock(&irq_mapping_update_lock);
- bind_evtchn_to_cpu(evtchn, info->cpu, false);
+ bind_evtchn_to_cpu(info, info->cpu, false);
/* Unmask the event channel. */
enable_irq(irq);
@@ -1806,7 +1790,7 @@ static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
* it, but don't do the xenlinux-level rebind in that case.
*/
if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
- bind_evtchn_to_cpu(evtchn, tcpu, false);
+ bind_evtchn_to_cpu(info, tcpu, false);
do_unmask(info, EVT_MASK_REASON_TEMPORARY);
@@ -1863,28 +1847,30 @@ static void enable_dynirq(struct irq_data *data)
do_unmask(info, EVT_MASK_REASON_EXPLICIT);
}
-static void disable_dynirq(struct irq_data *data)
+static void do_ack_dynirq(struct irq_info *info)
{
- struct irq_info *info = info_for_irq(data->irq);
- evtchn_port_t evtchn = info ? info->evtchn : 0;
+ evtchn_port_t evtchn = info->evtchn;
if (VALID_EVTCHN(evtchn))
- do_mask(info, EVT_MASK_REASON_EXPLICIT);
+ event_handler_exit(info);
}
static void ack_dynirq(struct irq_data *data)
{
struct irq_info *info = info_for_irq(data->irq);
- evtchn_port_t evtchn = info ? info->evtchn : 0;
- if (VALID_EVTCHN(evtchn))
- event_handler_exit(info);
+ if (info)
+ do_ack_dynirq(info);
}
static void mask_ack_dynirq(struct irq_data *data)
{
- disable_dynirq(data);
- ack_dynirq(data);
+ struct irq_info *info = info_for_irq(data->irq);
+
+ if (info) {
+ do_disable_dynirq(info);
+ do_ack_dynirq(info);
+ }
}
static void lateeoi_ack_dynirq(struct irq_data *data)
@@ -1957,13 +1943,13 @@ static void restore_pirqs(void)
if (rc) {
pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
gsi, irq, pirq, rc);
- xen_free_irq(irq);
+ xen_free_irq(info);
continue;
}
printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
- __startup_pirq(irq);
+ __startup_pirq(info);
}
}
@@ -1971,13 +1957,15 @@ static void restore_cpu_virqs(unsigned int cpu)
{
struct evtchn_bind_virq bind_virq;
evtchn_port_t evtchn;
+ struct irq_info *info;
int virq, irq;
for (virq = 0; virq < NR_VIRQS; virq++) {
if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
continue;
+ info = info_for_irq(irq);
- BUG_ON(virq_from_irq(irq) != virq);
+ BUG_ON(virq_from_irq(info) != virq);
/* Get a new binding from Xen. */
bind_virq.virq = virq;
@@ -1988,9 +1976,9 @@ static void restore_cpu_virqs(unsigned int cpu)
evtchn = bind_virq.port;
/* Record the new mapping. */
- (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
+ xen_irq_info_virq_setup(info, cpu, evtchn, virq);
/* The affinity mask is still valid */
- bind_evtchn_to_cpu(evtchn, cpu, false);
+ bind_evtchn_to_cpu(info, cpu, false);
}
}
@@ -1998,13 +1986,15 @@ static void restore_cpu_ipis(unsigned int cpu)
{
struct evtchn_bind_ipi bind_ipi;
evtchn_port_t evtchn;
+ struct irq_info *info;
int ipi, irq;
for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
continue;
+ info = info_for_irq(irq);
- BUG_ON(ipi_from_irq(irq) != ipi);
+ BUG_ON(ipi_from_irq(info) != ipi);
/* Get a new binding from Xen. */
bind_ipi.vcpu = xen_vcpu_nr(cpu);
@@ -2014,9 +2004,9 @@ static void restore_cpu_ipis(unsigned int cpu)
evtchn = bind_ipi.port;
/* Record the new mapping. */
- (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
+ xen_irq_info_ipi_setup(info, cpu, evtchn, ipi);
/* The affinity mask is still valid */
- bind_evtchn_to_cpu(evtchn, cpu, false);
+ bind_evtchn_to_cpu(info, cpu, false);
}
}
@@ -2030,13 +2020,6 @@ void xen_clear_irq_pending(int irq)
event_handler_exit(info);
}
EXPORT_SYMBOL(xen_clear_irq_pending);
-void xen_set_irq_pending(int irq)
-{
- evtchn_port_t evtchn = evtchn_from_irq(irq);
-
- if (VALID_EVTCHN(evtchn))
- set_evtchn(evtchn);
-}
bool xen_test_irq_pending(int irq)
{
diff --git a/drivers/xen/events/events_fifo.c b/drivers/xen/events/events_fifo.c
index ad9fe51d3f..655775db7c 100644
--- a/drivers/xen/events/events_fifo.c
+++ b/drivers/xen/events/events_fifo.c
@@ -226,21 +226,20 @@ static bool evtchn_fifo_is_masked(evtchn_port_t port)
*/
static bool clear_masked_cond(volatile event_word_t *word)
{
- event_word_t new, old, w;
+ event_word_t new, old;
- w = *word;
+ old = *word;
do {
- if (!(w & (1 << EVTCHN_FIFO_MASKED)))
+ if (!(old & (1 << EVTCHN_FIFO_MASKED)))
return true;
- if (w & (1 << EVTCHN_FIFO_PENDING))
+ if (old & (1 << EVTCHN_FIFO_PENDING))
return false;
- old = w & ~(1 << EVTCHN_FIFO_BUSY);
+ old = old & ~(1 << EVTCHN_FIFO_BUSY);
new = old & ~(1 << EVTCHN_FIFO_MASKED);
- w = sync_cmpxchg(word, old, new);
- } while (w != old);
+ } while (!sync_try_cmpxchg(word, &old, new));
return true;
}
@@ -259,17 +258,16 @@ static void evtchn_fifo_unmask(evtchn_port_t port)
static uint32_t clear_linked(volatile event_word_t *word)
{
- event_word_t new, old, w;
+ event_word_t new, old;
- w = *word;
+ old = *word;
do {
- old = w;
- new = (w & ~((1 << EVTCHN_FIFO_LINKED)
- | EVTCHN_FIFO_LINK_MASK));
- } while ((w = sync_cmpxchg(word, old, new)) != old);
+ new = (old & ~((1 << EVTCHN_FIFO_LINKED)
+ | EVTCHN_FIFO_LINK_MASK));
+ } while (!sync_try_cmpxchg(word, &old, new));
- return w & EVTCHN_FIFO_LINK_MASK;
+ return old & EVTCHN_FIFO_LINK_MASK;
}
static void consume_one_event(unsigned cpu, struct evtchn_loop_ctrl *ctrl,
diff --git a/drivers/xen/events/events_internal.h b/drivers/xen/events/events_internal.h
index 4d3398eff9..19ae31695e 100644
--- a/drivers/xen/events/events_internal.h
+++ b/drivers/xen/events/events_internal.h
@@ -33,7 +33,6 @@ struct evtchn_ops {
extern const struct evtchn_ops *evtchn_ops;
-int get_evtchn_to_irq(evtchn_port_t evtchn);
void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl);
unsigned int cpu_from_evtchn(evtchn_port_t evtchn);
diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c
index 9139a7364d..59717628ca 100644
--- a/drivers/xen/evtchn.c
+++ b/drivers/xen/evtchn.c
@@ -397,7 +397,7 @@ static int evtchn_bind_to_user(struct per_user_data *u, evtchn_port_t port,
if (rc < 0)
goto err;
- rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, 0,
+ rc = bind_evtchn_to_irqhandler_lateeoi(port, evtchn_interrupt, IRQF_SHARED,
u->name, evtchn);
if (rc < 0)
goto err;
diff --git a/drivers/xen/gntdev-dmabuf.c b/drivers/xen/gntdev-dmabuf.c
index 4440e626b7..42adc2c1e0 100644
--- a/drivers/xen/gntdev-dmabuf.c
+++ b/drivers/xen/gntdev-dmabuf.c
@@ -11,6 +11,7 @@
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/dma-buf.h>
+#include <linux/dma-direct.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/uaccess.h>
@@ -50,7 +51,7 @@ struct gntdev_dmabuf {
/* Number of pages this buffer has. */
int nr_pages;
- /* Pages of this buffer. */
+ /* Pages of this buffer (only for dma-buf export). */
struct page **pages;
};
@@ -484,7 +485,7 @@ out:
/* DMA buffer import support. */
static int
-dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
+dmabuf_imp_grant_foreign_access(unsigned long *gfns, u32 *refs,
int count, int domid)
{
grant_ref_t priv_gref_head;
@@ -507,7 +508,7 @@ dmabuf_imp_grant_foreign_access(struct page **pages, u32 *refs,
}
gnttab_grant_foreign_access_ref(cur_ref, domid,
- xen_page_to_gfn(pages[i]), 0);
+ gfns[i], 0);
refs[i] = cur_ref;
}
@@ -529,7 +530,6 @@ static void dmabuf_imp_end_foreign_access(u32 *refs, int count)
static void dmabuf_imp_free_storage(struct gntdev_dmabuf *gntdev_dmabuf)
{
- kfree(gntdev_dmabuf->pages);
kfree(gntdev_dmabuf->u.imp.refs);
kfree(gntdev_dmabuf);
}
@@ -549,12 +549,6 @@ static struct gntdev_dmabuf *dmabuf_imp_alloc_storage(int count)
if (!gntdev_dmabuf->u.imp.refs)
goto fail;
- gntdev_dmabuf->pages = kcalloc(count,
- sizeof(gntdev_dmabuf->pages[0]),
- GFP_KERNEL);
- if (!gntdev_dmabuf->pages)
- goto fail;
-
gntdev_dmabuf->nr_pages = count;
for (i = 0; i < count; i++)
@@ -576,7 +570,8 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
struct dma_buf *dma_buf;
struct dma_buf_attachment *attach;
struct sg_table *sgt;
- struct sg_page_iter sg_iter;
+ struct sg_dma_page_iter sg_iter;
+ unsigned long *gfns;
int i;
dma_buf = dma_buf_get(fd);
@@ -624,26 +619,31 @@ dmabuf_imp_to_refs(struct gntdev_dmabuf_priv *priv, struct device *dev,
gntdev_dmabuf->u.imp.sgt = sgt;
- /* Now convert sgt to array of pages and check for page validity. */
+ gfns = kcalloc(count, sizeof(*gfns), GFP_KERNEL);
+ if (!gfns) {
+ ret = ERR_PTR(-ENOMEM);
+ goto fail_unmap;
+ }
+
+ /*
+ * Now convert sgt to array of gfns without accessing underlying pages.
+ * It is not allowed to access the underlying struct page of an sg table
+ * exported by DMA-buf, but since we deal with special Xen dma device here
+ * (not a normal physical one) look at the dma addresses in the sg table
+ * and then calculate gfns directly from them.
+ */
i = 0;
- for_each_sgtable_page(sgt, &sg_iter, 0) {
- struct page *page = sg_page_iter_page(&sg_iter);
- /*
- * Check if page is valid: this can happen if we are given
- * a page from VRAM or other resources which are not backed
- * by a struct page.
- */
- if (!pfn_valid(page_to_pfn(page))) {
- ret = ERR_PTR(-EINVAL);
- goto fail_unmap;
- }
+ for_each_sgtable_dma_page(sgt, &sg_iter, 0) {
+ dma_addr_t addr = sg_page_iter_dma_address(&sg_iter);
+ unsigned long pfn = bfn_to_pfn(XEN_PFN_DOWN(dma_to_phys(dev, addr)));
- gntdev_dmabuf->pages[i++] = page;
+ gfns[i++] = pfn_to_gfn(pfn);
}
- ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gntdev_dmabuf->pages,
+ ret = ERR_PTR(dmabuf_imp_grant_foreign_access(gfns,
gntdev_dmabuf->u.imp.refs,
count, domid));
+ kfree(gfns);
if (IS_ERR(ret))
goto fail_end_access;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 35659bf707..04a6b470b1 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -427,16 +427,14 @@ EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref)
{
- u16 flags, nflags;
- u16 *pflags;
+ u16 *pflags = &gnttab_shared.v1[ref].flags;
+ u16 flags;
- pflags = &gnttab_shared.v1[ref].flags;
- nflags = *pflags;
+ flags = *pflags;
do {
- flags = nflags;
if (flags & (GTF_reading|GTF_writing))
return 0;
- } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
+ } while (!sync_try_cmpxchg(pflags, &flags, 0));
return 1;
}
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index da88173bac..0eb337a8ec 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -29,15 +29,18 @@
#include <linux/seq_file.h>
#include <linux/miscdevice.h>
#include <linux/moduleparam.h>
+#include <linux/virtio_mmio.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include <xen/xen.h>
+#include <xen/events.h>
#include <xen/privcmd.h>
#include <xen/interface/xen.h>
#include <xen/interface/memory.h>
#include <xen/interface/hvm/dm_op.h>
+#include <xen/interface/hvm/ioreq.h>
#include <xen/features.h>
#include <xen/page.h>
#include <xen/xen-ops.h>
@@ -782,6 +785,7 @@ static long privcmd_ioctl_mmap_resource(struct file *file,
goto out;
pages = vma->vm_private_data;
+
for (i = 0; i < kdata.num; i++) {
xen_pfn_t pfn =
page_to_xen_pfn(pages[i / XEN_PFN_PER_PAGE]);
@@ -838,7 +842,7 @@ out:
return rc;
}
-#ifdef CONFIG_XEN_PRIVCMD_IRQFD
+#ifdef CONFIG_XEN_PRIVCMD_EVENTFD
/* Irqfd support */
static struct workqueue_struct *irqfd_cleanup_wq;
static DEFINE_MUTEX(irqfds_lock);
@@ -1079,6 +1083,389 @@ static void privcmd_irqfd_exit(void)
destroy_workqueue(irqfd_cleanup_wq);
}
+
+/* Ioeventfd Support */
+#define QUEUE_NOTIFY_VQ_MASK 0xFFFF
+
+static DEFINE_MUTEX(ioreq_lock);
+static LIST_HEAD(ioreq_list);
+
+/* per-eventfd structure */
+struct privcmd_kernel_ioeventfd {
+ struct eventfd_ctx *eventfd;
+ struct list_head list;
+ u64 addr;
+ unsigned int addr_len;
+ unsigned int vq;
+};
+
+/* per-guest CPU / port structure */
+struct ioreq_port {
+ int vcpu;
+ unsigned int port;
+ struct privcmd_kernel_ioreq *kioreq;
+};
+
+/* per-guest structure */
+struct privcmd_kernel_ioreq {
+ domid_t dom;
+ unsigned int vcpus;
+ u64 uioreq;
+ struct ioreq *ioreq;
+ spinlock_t lock; /* Protects ioeventfds list */
+ struct list_head ioeventfds;
+ struct list_head list;
+ struct ioreq_port ports[] __counted_by(vcpus);
+};
+
+static irqreturn_t ioeventfd_interrupt(int irq, void *dev_id)
+{
+ struct ioreq_port *port = dev_id;
+ struct privcmd_kernel_ioreq *kioreq = port->kioreq;
+ struct ioreq *ioreq = &kioreq->ioreq[port->vcpu];
+ struct privcmd_kernel_ioeventfd *kioeventfd;
+ unsigned int state = STATE_IOREQ_READY;
+
+ if (ioreq->state != STATE_IOREQ_READY ||
+ ioreq->type != IOREQ_TYPE_COPY || ioreq->dir != IOREQ_WRITE)
+ return IRQ_NONE;
+
+ /*
+ * We need a barrier, smp_mb(), here to ensure reads are finished before
+ * `state` is updated. Since the lock implementation ensures that
+ * appropriate barrier will be added anyway, we can avoid adding
+ * explicit barrier here.
+ *
+ * Ideally we don't need to update `state` within the locks, but we do
+ * that here to avoid adding explicit barrier.
+ */
+
+ spin_lock(&kioreq->lock);
+ ioreq->state = STATE_IOREQ_INPROCESS;
+
+ list_for_each_entry(kioeventfd, &kioreq->ioeventfds, list) {
+ if (ioreq->addr == kioeventfd->addr + VIRTIO_MMIO_QUEUE_NOTIFY &&
+ ioreq->size == kioeventfd->addr_len &&
+ (ioreq->data & QUEUE_NOTIFY_VQ_MASK) == kioeventfd->vq) {
+ eventfd_signal(kioeventfd->eventfd, 1);
+ state = STATE_IORESP_READY;
+ break;
+ }
+ }
+ spin_unlock(&kioreq->lock);
+
+ /*
+ * We need a barrier, smp_mb(), here to ensure writes are finished
+ * before `state` is updated. Since the lock implementation ensures that
+ * appropriate barrier will be added anyway, we can avoid adding
+ * explicit barrier here.
+ */
+
+ ioreq->state = state;
+
+ if (state == STATE_IORESP_READY) {
+ notify_remote_via_evtchn(port->port);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static void ioreq_free(struct privcmd_kernel_ioreq *kioreq)
+{
+ struct ioreq_port *ports = kioreq->ports;
+ int i;
+
+ lockdep_assert_held(&ioreq_lock);
+
+ list_del(&kioreq->list);
+
+ for (i = kioreq->vcpus - 1; i >= 0; i--)
+ unbind_from_irqhandler(irq_from_evtchn(ports[i].port), &ports[i]);
+
+ kfree(kioreq);
+}
+
+static
+struct privcmd_kernel_ioreq *alloc_ioreq(struct privcmd_ioeventfd *ioeventfd)
+{
+ struct privcmd_kernel_ioreq *kioreq;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ struct page **pages;
+ unsigned int *ports;
+ int ret, size, i;
+
+ lockdep_assert_held(&ioreq_lock);
+
+ size = struct_size(kioreq, ports, ioeventfd->vcpus);
+ kioreq = kzalloc(size, GFP_KERNEL);
+ if (!kioreq)
+ return ERR_PTR(-ENOMEM);
+
+ kioreq->dom = ioeventfd->dom;
+ kioreq->vcpus = ioeventfd->vcpus;
+ kioreq->uioreq = ioeventfd->ioreq;
+ spin_lock_init(&kioreq->lock);
+ INIT_LIST_HEAD(&kioreq->ioeventfds);
+
+ /* The memory for ioreq server must have been mapped earlier */
+ mmap_write_lock(mm);
+ vma = find_vma(mm, (unsigned long)ioeventfd->ioreq);
+ if (!vma) {
+ pr_err("Failed to find vma for ioreq page!\n");
+ mmap_write_unlock(mm);
+ ret = -EFAULT;
+ goto error_kfree;
+ }
+
+ pages = vma->vm_private_data;
+ kioreq->ioreq = (struct ioreq *)(page_to_virt(pages[0]));
+ mmap_write_unlock(mm);
+
+ size = sizeof(*ports) * kioreq->vcpus;
+ ports = kzalloc(size, GFP_KERNEL);
+ if (!ports) {
+ ret = -ENOMEM;
+ goto error_kfree;
+ }
+
+ if (copy_from_user(ports, u64_to_user_ptr(ioeventfd->ports), size)) {
+ ret = -EFAULT;
+ goto error_kfree_ports;
+ }
+
+ for (i = 0; i < kioreq->vcpus; i++) {
+ kioreq->ports[i].vcpu = i;
+ kioreq->ports[i].port = ports[i];
+ kioreq->ports[i].kioreq = kioreq;
+
+ ret = bind_evtchn_to_irqhandler_lateeoi(ports[i],
+ ioeventfd_interrupt, IRQF_SHARED, "ioeventfd",
+ &kioreq->ports[i]);
+ if (ret < 0)
+ goto error_unbind;
+ }
+
+ kfree(ports);
+
+ list_add_tail(&kioreq->list, &ioreq_list);
+
+ return kioreq;
+
+error_unbind:
+ while (--i >= 0)
+ unbind_from_irqhandler(irq_from_evtchn(ports[i]), &kioreq->ports[i]);
+error_kfree_ports:
+ kfree(ports);
+error_kfree:
+ kfree(kioreq);
+ return ERR_PTR(ret);
+}
+
+static struct privcmd_kernel_ioreq *
+get_ioreq(struct privcmd_ioeventfd *ioeventfd, struct eventfd_ctx *eventfd)
+{
+ struct privcmd_kernel_ioreq *kioreq;
+ unsigned long flags;
+
+ list_for_each_entry(kioreq, &ioreq_list, list) {
+ struct privcmd_kernel_ioeventfd *kioeventfd;
+
+ /*
+ * kioreq fields can be accessed here without a lock as they are
+ * never updated after being added to the ioreq_list.
+ */
+ if (kioreq->uioreq != ioeventfd->ioreq) {
+ continue;
+ } else if (kioreq->dom != ioeventfd->dom ||
+ kioreq->vcpus != ioeventfd->vcpus) {
+ pr_err("Invalid ioeventfd configuration mismatch, dom (%u vs %u), vcpus (%u vs %u)\n",
+ kioreq->dom, ioeventfd->dom, kioreq->vcpus,
+ ioeventfd->vcpus);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Look for a duplicate eventfd for the same guest */
+ spin_lock_irqsave(&kioreq->lock, flags);
+ list_for_each_entry(kioeventfd, &kioreq->ioeventfds, list) {
+ if (eventfd == kioeventfd->eventfd) {
+ spin_unlock_irqrestore(&kioreq->lock, flags);
+ return ERR_PTR(-EBUSY);
+ }
+ }
+ spin_unlock_irqrestore(&kioreq->lock, flags);
+
+ return kioreq;
+ }
+
+ /* Matching kioreq isn't found, allocate a new one */
+ return alloc_ioreq(ioeventfd);
+}
+
+static void ioeventfd_free(struct privcmd_kernel_ioeventfd *kioeventfd)
+{
+ list_del(&kioeventfd->list);
+ eventfd_ctx_put(kioeventfd->eventfd);
+ kfree(kioeventfd);
+}
+
+static int privcmd_ioeventfd_assign(struct privcmd_ioeventfd *ioeventfd)
+{
+ struct privcmd_kernel_ioeventfd *kioeventfd;
+ struct privcmd_kernel_ioreq *kioreq;
+ unsigned long flags;
+ struct fd f;
+ int ret;
+
+ /* Check for range overflow */
+ if (ioeventfd->addr + ioeventfd->addr_len < ioeventfd->addr)
+ return -EINVAL;
+
+ /* Vhost requires us to support length 1, 2, 4, and 8 */
+ if (!(ioeventfd->addr_len == 1 || ioeventfd->addr_len == 2 ||
+ ioeventfd->addr_len == 4 || ioeventfd->addr_len == 8))
+ return -EINVAL;
+
+ /* 4096 vcpus limit enough ? */
+ if (!ioeventfd->vcpus || ioeventfd->vcpus > 4096)
+ return -EINVAL;
+
+ kioeventfd = kzalloc(sizeof(*kioeventfd), GFP_KERNEL);
+ if (!kioeventfd)
+ return -ENOMEM;
+
+ f = fdget(ioeventfd->event_fd);
+ if (!f.file) {
+ ret = -EBADF;
+ goto error_kfree;
+ }
+
+ kioeventfd->eventfd = eventfd_ctx_fileget(f.file);
+ fdput(f);
+
+ if (IS_ERR(kioeventfd->eventfd)) {
+ ret = PTR_ERR(kioeventfd->eventfd);
+ goto error_kfree;
+ }
+
+ kioeventfd->addr = ioeventfd->addr;
+ kioeventfd->addr_len = ioeventfd->addr_len;
+ kioeventfd->vq = ioeventfd->vq;
+
+ mutex_lock(&ioreq_lock);
+ kioreq = get_ioreq(ioeventfd, kioeventfd->eventfd);
+ if (IS_ERR(kioreq)) {
+ mutex_unlock(&ioreq_lock);
+ ret = PTR_ERR(kioreq);
+ goto error_eventfd;
+ }
+
+ spin_lock_irqsave(&kioreq->lock, flags);
+ list_add_tail(&kioeventfd->list, &kioreq->ioeventfds);
+ spin_unlock_irqrestore(&kioreq->lock, flags);
+
+ mutex_unlock(&ioreq_lock);
+
+ return 0;
+
+error_eventfd:
+ eventfd_ctx_put(kioeventfd->eventfd);
+
+error_kfree:
+ kfree(kioeventfd);
+ return ret;
+}
+
+static int privcmd_ioeventfd_deassign(struct privcmd_ioeventfd *ioeventfd)
+{
+ struct privcmd_kernel_ioreq *kioreq, *tkioreq;
+ struct eventfd_ctx *eventfd;
+ unsigned long flags;
+ int ret = 0;
+
+ eventfd = eventfd_ctx_fdget(ioeventfd->event_fd);
+ if (IS_ERR(eventfd))
+ return PTR_ERR(eventfd);
+
+ mutex_lock(&ioreq_lock);
+ list_for_each_entry_safe(kioreq, tkioreq, &ioreq_list, list) {
+ struct privcmd_kernel_ioeventfd *kioeventfd, *tmp;
+ /*
+ * kioreq fields can be accessed here without a lock as they are
+ * never updated after being added to the ioreq_list.
+ */
+ if (kioreq->dom != ioeventfd->dom ||
+ kioreq->uioreq != ioeventfd->ioreq ||
+ kioreq->vcpus != ioeventfd->vcpus)
+ continue;
+
+ spin_lock_irqsave(&kioreq->lock, flags);
+ list_for_each_entry_safe(kioeventfd, tmp, &kioreq->ioeventfds, list) {
+ if (eventfd == kioeventfd->eventfd) {
+ ioeventfd_free(kioeventfd);
+ spin_unlock_irqrestore(&kioreq->lock, flags);
+
+ if (list_empty(&kioreq->ioeventfds))
+ ioreq_free(kioreq);
+ goto unlock;
+ }
+ }
+ spin_unlock_irqrestore(&kioreq->lock, flags);
+ break;
+ }
+
+ pr_err("Ioeventfd isn't already assigned, dom: %u, addr: %llu\n",
+ ioeventfd->dom, ioeventfd->addr);
+ ret = -ENODEV;
+
+unlock:
+ mutex_unlock(&ioreq_lock);
+ eventfd_ctx_put(eventfd);
+
+ return ret;
+}
+
+static long privcmd_ioctl_ioeventfd(struct file *file, void __user *udata)
+{
+ struct privcmd_data *data = file->private_data;
+ struct privcmd_ioeventfd ioeventfd;
+
+ if (copy_from_user(&ioeventfd, udata, sizeof(ioeventfd)))
+ return -EFAULT;
+
+ /* No other flags should be set */
+ if (ioeventfd.flags & ~PRIVCMD_IOEVENTFD_FLAG_DEASSIGN)
+ return -EINVAL;
+
+ /* If restriction is in place, check the domid matches */
+ if (data->domid != DOMID_INVALID && data->domid != ioeventfd.dom)
+ return -EPERM;
+
+ if (ioeventfd.flags & PRIVCMD_IOEVENTFD_FLAG_DEASSIGN)
+ return privcmd_ioeventfd_deassign(&ioeventfd);
+
+ return privcmd_ioeventfd_assign(&ioeventfd);
+}
+
+static void privcmd_ioeventfd_exit(void)
+{
+ struct privcmd_kernel_ioreq *kioreq, *tmp;
+ unsigned long flags;
+
+ mutex_lock(&ioreq_lock);
+ list_for_each_entry_safe(kioreq, tmp, &ioreq_list, list) {
+ struct privcmd_kernel_ioeventfd *kioeventfd, *tmp;
+
+ spin_lock_irqsave(&kioreq->lock, flags);
+ list_for_each_entry_safe(kioeventfd, tmp, &kioreq->ioeventfds, list)
+ ioeventfd_free(kioeventfd);
+ spin_unlock_irqrestore(&kioreq->lock, flags);
+
+ ioreq_free(kioreq);
+ }
+ mutex_unlock(&ioreq_lock);
+}
#else
static inline long privcmd_ioctl_irqfd(struct file *file, void __user *udata)
{
@@ -1093,7 +1480,16 @@ static inline int privcmd_irqfd_init(void)
static inline void privcmd_irqfd_exit(void)
{
}
-#endif /* CONFIG_XEN_PRIVCMD_IRQFD */
+
+static inline long privcmd_ioctl_ioeventfd(struct file *file, void __user *udata)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void privcmd_ioeventfd_exit(void)
+{
+}
+#endif /* CONFIG_XEN_PRIVCMD_EVENTFD */
static long privcmd_ioctl(struct file *file,
unsigned int cmd, unsigned long data)
@@ -1134,6 +1530,10 @@ static long privcmd_ioctl(struct file *file,
ret = privcmd_ioctl_irqfd(file, udata);
break;
+ case IOCTL_PRIVCMD_IOEVENTFD:
+ ret = privcmd_ioctl_ioeventfd(file, udata);
+ break;
+
default:
break;
}
@@ -1278,6 +1678,7 @@ err_privcmdbuf:
static void __exit privcmd_exit(void)
{
+ privcmd_ioeventfd_exit();
privcmd_irqfd_exit();
misc_deregister(&privcmd_dev);
misc_deregister(&xen_privcmdbuf_dev);
diff --git a/drivers/xen/xen-front-pgdir-shbuf.c b/drivers/xen/xen-front-pgdir-shbuf.c
index b52e0fa595..223870a011 100644
--- a/drivers/xen/xen-front-pgdir-shbuf.c
+++ b/drivers/xen/xen-front-pgdir-shbuf.c
@@ -21,7 +21,7 @@
#include <xen/xen-front-pgdir-shbuf.h>
-/**
+/*
* This structure represents the structure of a shared page
* that contains grant references to the pages of the shared
* buffer. This structure is common to many Xen para-virtualized
@@ -33,7 +33,7 @@ struct xen_page_directory {
grant_ref_t gref[]; /* Variable length */
};
-/**
+/*
* Shared buffer ops which are differently implemented
* depending on the allocation mode, e.g. if the buffer
* is allocated by the corresponding backend or frontend.
@@ -61,7 +61,7 @@ struct xen_front_pgdir_shbuf_ops {
int (*unmap)(struct xen_front_pgdir_shbuf *buf);
};
-/**
+/*
* Get granted reference to the very first page of the
* page directory. Usually this is passed to the backend,
* so it can find/fill the grant references to the buffer's
@@ -81,7 +81,7 @@ xen_front_pgdir_shbuf_get_dir_start(struct xen_front_pgdir_shbuf *buf)
}
EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_get_dir_start);
-/**
+/*
* Map granted references of the shared buffer.
*
* Depending on the shared buffer mode of allocation
@@ -102,7 +102,7 @@ int xen_front_pgdir_shbuf_map(struct xen_front_pgdir_shbuf *buf)
}
EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_map);
-/**
+/*
* Unmap granted references of the shared buffer.
*
* Depending on the shared buffer mode of allocation
@@ -123,7 +123,7 @@ int xen_front_pgdir_shbuf_unmap(struct xen_front_pgdir_shbuf *buf)
}
EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_unmap);
-/**
+/*
* Free all the resources of the shared buffer.
*
* \param buf shared buffer which resources to be freed.
@@ -150,7 +150,7 @@ EXPORT_SYMBOL_GPL(xen_front_pgdir_shbuf_free);
offsetof(struct xen_page_directory, \
gref)) / sizeof(grant_ref_t))
-/**
+/*
* Get the number of pages the page directory consumes itself.
*
* \param buf shared buffer.
@@ -160,7 +160,7 @@ static int get_num_pages_dir(struct xen_front_pgdir_shbuf *buf)
return DIV_ROUND_UP(buf->num_pages, XEN_NUM_GREFS_PER_PAGE);
}
-/**
+/*
* Calculate the number of grant references needed to share the buffer
* and its pages when backend allocates the buffer.
*
@@ -172,7 +172,7 @@ static void backend_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
buf->num_grefs = get_num_pages_dir(buf);
}
-/**
+/*
* Calculate the number of grant references needed to share the buffer
* and its pages when frontend allocates the buffer.
*
@@ -190,7 +190,7 @@ static void guest_calc_num_grefs(struct xen_front_pgdir_shbuf *buf)
#define xen_page_to_vaddr(page) \
((uintptr_t)pfn_to_kaddr(page_to_xen_pfn(page)))
-/**
+/*
* Unmap the buffer previously mapped with grant references
* provided by the backend.
*
@@ -238,7 +238,7 @@ static int backend_unmap(struct xen_front_pgdir_shbuf *buf)
return ret;
}
-/**
+/*
* Map the buffer with grant references provided by the backend.
*
* \param buf shared buffer.
@@ -320,7 +320,7 @@ static int backend_map(struct xen_front_pgdir_shbuf *buf)
return ret;
}
-/**
+/*
* Fill page directory with grant references to the pages of the
* page directory itself.
*
@@ -350,7 +350,7 @@ static void backend_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
page_dir->gref_dir_next_page = XEN_GREF_LIST_END;
}
-/**
+/*
* Fill page directory with grant references to the pages of the
* page directory and the buffer we share with the backend.
*
@@ -389,7 +389,7 @@ static void guest_fill_page_dir(struct xen_front_pgdir_shbuf *buf)
}
}
-/**
+/*
* Grant references to the frontend's buffer pages.
*
* These will be shared with the backend, so it can
@@ -418,7 +418,7 @@ static int guest_grant_refs_for_buffer(struct xen_front_pgdir_shbuf *buf,
return 0;
}
-/**
+/*
* Grant all the references needed to share the buffer.
*
* Grant references to the page directory pages and, if
@@ -466,7 +466,7 @@ static int grant_references(struct xen_front_pgdir_shbuf *buf)
return 0;
}
-/**
+/*
* Allocate all required structures to mange shared buffer.
*
* \param buf shared buffer.
@@ -506,7 +506,7 @@ static const struct xen_front_pgdir_shbuf_ops local_ops = {
.grant_refs_for_buffer = guest_grant_refs_for_buffer,
};
-/**
+/*
* Allocate a new instance of a shared buffer.
*
* \param cfg configuration to be used while allocating a new shared buffer.
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 8b77e4c06e..0c51edfd13 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1832,6 +1832,9 @@ static const struct target_core_fabric_ops scsiback_ops = {
.tfc_wwn_attrs = scsiback_wwn_attrs,
.tfc_tpg_base_attrs = scsiback_tpg_attrs,
.tfc_tpg_param_attrs = scsiback_param_attrs,
+
+ .default_submit_type = TARGET_DIRECT_SUBMIT,
+ .direct_submit_supp = 1,
};
static const struct xenbus_device_id scsiback_ids[] = {
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 0792fda49a..6f56640092 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -82,7 +82,7 @@ struct read_buffer {
struct list_head list;
unsigned int cons;
unsigned int len;
- char msg[];
+ char msg[] __counted_by(len);
};
struct xenbus_file_priv {
@@ -195,7 +195,7 @@ static int queue_reply(struct list_head *queue, const void *data, size_t len)
if (len > XENSTORE_PAYLOAD_MAX)
return -EINVAL;
- rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
+ rb = kmalloc(struct_size(rb, msg, len), GFP_KERNEL);
if (rb == NULL)
return -ENOMEM;
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index da96c260e2..5ebb723307 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -284,13 +284,9 @@ static unsigned long backend_shrink_memory_count(struct shrinker *shrinker,
return 0;
}
-static struct shrinker backend_memory_shrinker = {
- .count_objects = backend_shrink_memory_count,
- .seeks = DEFAULT_SEEKS,
-};
-
static int __init xenbus_probe_backend_init(void)
{
+ struct shrinker *backend_memory_shrinker;
static struct notifier_block xenstore_notifier = {
.notifier_call = backend_probe_and_watch
};
@@ -305,8 +301,15 @@ static int __init xenbus_probe_backend_init(void)
register_xenstore_notifier(&xenstore_notifier);
- if (register_shrinker(&backend_memory_shrinker, "xen-backend"))
- pr_warn("shrinker registration failed\n");
+ backend_memory_shrinker = shrinker_alloc(0, "xen-backend");
+ if (!backend_memory_shrinker) {
+ pr_warn("shrinker allocation failed\n");
+ return 0;
+ }
+
+ backend_memory_shrinker->count_objects = backend_shrink_memory_count;
+
+ shrinker_register(backend_memory_shrinker);
return 0;
}