diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /tools/testing/selftests/kvm/lib | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'tools/testing/selftests/kvm/lib')
34 files changed, 10986 insertions, 0 deletions
diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic.c b/tools/testing/selftests/kvm/lib/aarch64/gic.c new file mode 100644 index 0000000000..55668631d5 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/aarch64/gic.c @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM Generic Interrupt Controller (GIC) support + */ + +#include <errno.h> +#include <linux/bits.h> +#include <linux/sizes.h> + +#include "kvm_util.h" + +#include <gic.h> +#include "gic_private.h" +#include "processor.h" +#include "spinlock.h" + +static const struct gic_common_ops *gic_common_ops; +static struct spinlock gic_lock; + +static void gic_cpu_init(unsigned int cpu, void *redist_base) +{ + gic_common_ops->gic_cpu_init(cpu, redist_base); +} + +static void +gic_dist_init(enum gic_type type, unsigned int nr_cpus, void *dist_base) +{ + const struct gic_common_ops *gic_ops = NULL; + + spin_lock(&gic_lock); + + /* Distributor initialization is needed only once per VM */ + if (gic_common_ops) { + spin_unlock(&gic_lock); + return; + } + + if (type == GIC_V3) + gic_ops = &gicv3_ops; + + GUEST_ASSERT(gic_ops); + + gic_ops->gic_init(nr_cpus, dist_base); + gic_common_ops = gic_ops; + + /* Make sure that the initialized data is visible to all the vCPUs */ + dsb(sy); + + spin_unlock(&gic_lock); +} + +void gic_init(enum gic_type type, unsigned int nr_cpus, + void *dist_base, void *redist_base) +{ + uint32_t cpu = guest_get_vcpuid(); + + GUEST_ASSERT(type < GIC_TYPE_MAX); + GUEST_ASSERT(dist_base); + GUEST_ASSERT(redist_base); + GUEST_ASSERT(nr_cpus); + + gic_dist_init(type, nr_cpus, dist_base); + gic_cpu_init(cpu, redist_base); +} + +void gic_irq_enable(unsigned int intid) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_irq_enable(intid); +} + +void gic_irq_disable(unsigned int intid) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_irq_disable(intid); +} + +unsigned int gic_get_and_ack_irq(void) +{ + uint64_t irqstat; + unsigned int intid; + + GUEST_ASSERT(gic_common_ops); + + irqstat = gic_common_ops->gic_read_iar(); + intid = irqstat & GENMASK(23, 0); + + return intid; +} + +void gic_set_eoi(unsigned int intid) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_write_eoir(intid); +} + +void gic_set_dir(unsigned int intid) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_write_dir(intid); +} + +void gic_set_eoi_split(bool split) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_set_eoi_split(split); +} + +void gic_set_priority_mask(uint64_t pmr) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_set_priority_mask(pmr); +} + +void gic_set_priority(unsigned int intid, unsigned int prio) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_set_priority(intid, prio); +} + +void gic_irq_set_active(unsigned int intid) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_irq_set_active(intid); +} + +void gic_irq_clear_active(unsigned int intid) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_irq_clear_active(intid); +} + +bool gic_irq_get_active(unsigned int intid) +{ + GUEST_ASSERT(gic_common_ops); + return gic_common_ops->gic_irq_get_active(intid); +} + +void gic_irq_set_pending(unsigned int intid) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_irq_set_pending(intid); +} + +void gic_irq_clear_pending(unsigned int intid) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_irq_clear_pending(intid); +} + +bool gic_irq_get_pending(unsigned int intid) +{ + GUEST_ASSERT(gic_common_ops); + return gic_common_ops->gic_irq_get_pending(intid); +} + +void gic_irq_set_config(unsigned int intid, bool is_edge) +{ + GUEST_ASSERT(gic_common_ops); + gic_common_ops->gic_irq_set_config(intid, is_edge); +} diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic_private.h b/tools/testing/selftests/kvm/lib/aarch64/gic_private.h new file mode 100644 index 0000000000..75d07313c8 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/aarch64/gic_private.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * ARM Generic Interrupt Controller (GIC) private defines that's only + * shared among the GIC library code. + */ + +#ifndef SELFTEST_KVM_GIC_PRIVATE_H +#define SELFTEST_KVM_GIC_PRIVATE_H + +struct gic_common_ops { + void (*gic_init)(unsigned int nr_cpus, void *dist_base); + void (*gic_cpu_init)(unsigned int cpu, void *redist_base); + void (*gic_irq_enable)(unsigned int intid); + void (*gic_irq_disable)(unsigned int intid); + uint64_t (*gic_read_iar)(void); + void (*gic_write_eoir)(uint32_t irq); + void (*gic_write_dir)(uint32_t irq); + void (*gic_set_eoi_split)(bool split); + void (*gic_set_priority_mask)(uint64_t mask); + void (*gic_set_priority)(uint32_t intid, uint32_t prio); + void (*gic_irq_set_active)(uint32_t intid); + void (*gic_irq_clear_active)(uint32_t intid); + bool (*gic_irq_get_active)(uint32_t intid); + void (*gic_irq_set_pending)(uint32_t intid); + void (*gic_irq_clear_pending)(uint32_t intid); + bool (*gic_irq_get_pending)(uint32_t intid); + void (*gic_irq_set_config)(uint32_t intid, bool is_edge); +}; + +extern const struct gic_common_ops gicv3_ops; + +#endif /* SELFTEST_KVM_GIC_PRIVATE_H */ diff --git a/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c new file mode 100644 index 0000000000..263bf3ed8f --- /dev/null +++ b/tools/testing/selftests/kvm/lib/aarch64/gic_v3.c @@ -0,0 +1,398 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM Generic Interrupt Controller (GIC) v3 support + */ + +#include <linux/sizes.h> + +#include "kvm_util.h" +#include "processor.h" +#include "delay.h" + +#include "gic_v3.h" +#include "gic_private.h" + +struct gicv3_data { + void *dist_base; + void *redist_base[GICV3_MAX_CPUS]; + unsigned int nr_cpus; + unsigned int nr_spis; +}; + +#define sgi_base_from_redist(redist_base) (redist_base + SZ_64K) +#define DIST_BIT (1U << 31) + +enum gicv3_intid_range { + SGI_RANGE, + PPI_RANGE, + SPI_RANGE, + INVALID_RANGE, +}; + +static struct gicv3_data gicv3_data; + +static void gicv3_gicd_wait_for_rwp(void) +{ + unsigned int count = 100000; /* 1s */ + + while (readl(gicv3_data.dist_base + GICD_CTLR) & GICD_CTLR_RWP) { + GUEST_ASSERT(count--); + udelay(10); + } +} + +static void gicv3_gicr_wait_for_rwp(void *redist_base) +{ + unsigned int count = 100000; /* 1s */ + + while (readl(redist_base + GICR_CTLR) & GICR_CTLR_RWP) { + GUEST_ASSERT(count--); + udelay(10); + } +} + +static void gicv3_wait_for_rwp(uint32_t cpu_or_dist) +{ + if (cpu_or_dist & DIST_BIT) + gicv3_gicd_wait_for_rwp(); + else + gicv3_gicr_wait_for_rwp(gicv3_data.redist_base[cpu_or_dist]); +} + +static enum gicv3_intid_range get_intid_range(unsigned int intid) +{ + switch (intid) { + case 0 ... 15: + return SGI_RANGE; + case 16 ... 31: + return PPI_RANGE; + case 32 ... 1019: + return SPI_RANGE; + } + + /* We should not be reaching here */ + GUEST_ASSERT(0); + + return INVALID_RANGE; +} + +static uint64_t gicv3_read_iar(void) +{ + uint64_t irqstat = read_sysreg_s(SYS_ICC_IAR1_EL1); + + dsb(sy); + return irqstat; +} + +static void gicv3_write_eoir(uint32_t irq) +{ + write_sysreg_s(irq, SYS_ICC_EOIR1_EL1); + isb(); +} + +static void gicv3_write_dir(uint32_t irq) +{ + write_sysreg_s(irq, SYS_ICC_DIR_EL1); + isb(); +} + +static void gicv3_set_priority_mask(uint64_t mask) +{ + write_sysreg_s(mask, SYS_ICC_PMR_EL1); +} + +static void gicv3_set_eoi_split(bool split) +{ + uint32_t val; + + /* + * All other fields are read-only, so no need to read CTLR first. In + * fact, the kernel does the same. + */ + val = split ? (1U << 1) : 0; + write_sysreg_s(val, SYS_ICC_CTLR_EL1); + isb(); +} + +uint32_t gicv3_reg_readl(uint32_t cpu_or_dist, uint64_t offset) +{ + void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base + : sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]); + return readl(base + offset); +} + +void gicv3_reg_writel(uint32_t cpu_or_dist, uint64_t offset, uint32_t reg_val) +{ + void *base = cpu_or_dist & DIST_BIT ? gicv3_data.dist_base + : sgi_base_from_redist(gicv3_data.redist_base[cpu_or_dist]); + writel(reg_val, base + offset); +} + +uint32_t gicv3_getl_fields(uint32_t cpu_or_dist, uint64_t offset, uint32_t mask) +{ + return gicv3_reg_readl(cpu_or_dist, offset) & mask; +} + +void gicv3_setl_fields(uint32_t cpu_or_dist, uint64_t offset, + uint32_t mask, uint32_t reg_val) +{ + uint32_t tmp = gicv3_reg_readl(cpu_or_dist, offset) & ~mask; + + tmp |= (reg_val & mask); + gicv3_reg_writel(cpu_or_dist, offset, tmp); +} + +/* + * We use a single offset for the distributor and redistributor maps as they + * have the same value in both. The only exceptions are registers that only + * exist in one and not the other, like GICR_WAKER that doesn't exist in the + * distributor map. Such registers are conveniently marked as reserved in the + * map that doesn't implement it; like GICR_WAKER's offset of 0x0014 being + * marked as "Reserved" in the Distributor map. + */ +static void gicv3_access_reg(uint32_t intid, uint64_t offset, + uint32_t reg_bits, uint32_t bits_per_field, + bool write, uint32_t *val) +{ + uint32_t cpu = guest_get_vcpuid(); + enum gicv3_intid_range intid_range = get_intid_range(intid); + uint32_t fields_per_reg, index, mask, shift; + uint32_t cpu_or_dist; + + GUEST_ASSERT(bits_per_field <= reg_bits); + GUEST_ASSERT(!write || *val < (1U << bits_per_field)); + /* + * This function does not support 64 bit accesses. Just asserting here + * until we implement readq/writeq. + */ + GUEST_ASSERT(reg_bits == 32); + + fields_per_reg = reg_bits / bits_per_field; + index = intid % fields_per_reg; + shift = index * bits_per_field; + mask = ((1U << bits_per_field) - 1) << shift; + + /* Set offset to the actual register holding intid's config. */ + offset += (intid / fields_per_reg) * (reg_bits / 8); + + cpu_or_dist = (intid_range == SPI_RANGE) ? DIST_BIT : cpu; + + if (write) + gicv3_setl_fields(cpu_or_dist, offset, mask, *val << shift); + *val = gicv3_getl_fields(cpu_or_dist, offset, mask) >> shift; +} + +static void gicv3_write_reg(uint32_t intid, uint64_t offset, + uint32_t reg_bits, uint32_t bits_per_field, uint32_t val) +{ + gicv3_access_reg(intid, offset, reg_bits, + bits_per_field, true, &val); +} + +static uint32_t gicv3_read_reg(uint32_t intid, uint64_t offset, + uint32_t reg_bits, uint32_t bits_per_field) +{ + uint32_t val; + + gicv3_access_reg(intid, offset, reg_bits, + bits_per_field, false, &val); + return val; +} + +static void gicv3_set_priority(uint32_t intid, uint32_t prio) +{ + gicv3_write_reg(intid, GICD_IPRIORITYR, 32, 8, prio); +} + +/* Sets the intid to be level-sensitive or edge-triggered. */ +static void gicv3_irq_set_config(uint32_t intid, bool is_edge) +{ + uint32_t val; + + /* N/A for private interrupts. */ + GUEST_ASSERT(get_intid_range(intid) == SPI_RANGE); + val = is_edge ? 2 : 0; + gicv3_write_reg(intid, GICD_ICFGR, 32, 2, val); +} + +static void gicv3_irq_enable(uint32_t intid) +{ + bool is_spi = get_intid_range(intid) == SPI_RANGE; + uint32_t cpu = guest_get_vcpuid(); + + gicv3_write_reg(intid, GICD_ISENABLER, 32, 1, 1); + gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu); +} + +static void gicv3_irq_disable(uint32_t intid) +{ + bool is_spi = get_intid_range(intid) == SPI_RANGE; + uint32_t cpu = guest_get_vcpuid(); + + gicv3_write_reg(intid, GICD_ICENABLER, 32, 1, 1); + gicv3_wait_for_rwp(is_spi ? DIST_BIT : cpu); +} + +static void gicv3_irq_set_active(uint32_t intid) +{ + gicv3_write_reg(intid, GICD_ISACTIVER, 32, 1, 1); +} + +static void gicv3_irq_clear_active(uint32_t intid) +{ + gicv3_write_reg(intid, GICD_ICACTIVER, 32, 1, 1); +} + +static bool gicv3_irq_get_active(uint32_t intid) +{ + return gicv3_read_reg(intid, GICD_ISACTIVER, 32, 1); +} + +static void gicv3_irq_set_pending(uint32_t intid) +{ + gicv3_write_reg(intid, GICD_ISPENDR, 32, 1, 1); +} + +static void gicv3_irq_clear_pending(uint32_t intid) +{ + gicv3_write_reg(intid, GICD_ICPENDR, 32, 1, 1); +} + +static bool gicv3_irq_get_pending(uint32_t intid) +{ + return gicv3_read_reg(intid, GICD_ISPENDR, 32, 1); +} + +static void gicv3_enable_redist(void *redist_base) +{ + uint32_t val = readl(redist_base + GICR_WAKER); + unsigned int count = 100000; /* 1s */ + + val &= ~GICR_WAKER_ProcessorSleep; + writel(val, redist_base + GICR_WAKER); + + /* Wait until the processor is 'active' */ + while (readl(redist_base + GICR_WAKER) & GICR_WAKER_ChildrenAsleep) { + GUEST_ASSERT(count--); + udelay(10); + } +} + +static inline void *gicr_base_cpu(void *redist_base, uint32_t cpu) +{ + /* Align all the redistributors sequentially */ + return redist_base + cpu * SZ_64K * 2; +} + +static void gicv3_cpu_init(unsigned int cpu, void *redist_base) +{ + void *sgi_base; + unsigned int i; + void *redist_base_cpu; + + GUEST_ASSERT(cpu < gicv3_data.nr_cpus); + + redist_base_cpu = gicr_base_cpu(redist_base, cpu); + sgi_base = sgi_base_from_redist(redist_base_cpu); + + gicv3_enable_redist(redist_base_cpu); + + /* + * Mark all the SGI and PPI interrupts as non-secure Group-1. + * Also, deactivate and disable them. + */ + writel(~0, sgi_base + GICR_IGROUPR0); + writel(~0, sgi_base + GICR_ICACTIVER0); + writel(~0, sgi_base + GICR_ICENABLER0); + + /* Set a default priority for all the SGIs and PPIs */ + for (i = 0; i < 32; i += 4) + writel(GICD_INT_DEF_PRI_X4, + sgi_base + GICR_IPRIORITYR0 + i); + + gicv3_gicr_wait_for_rwp(redist_base_cpu); + + /* Enable the GIC system register (ICC_*) access */ + write_sysreg_s(read_sysreg_s(SYS_ICC_SRE_EL1) | ICC_SRE_EL1_SRE, + SYS_ICC_SRE_EL1); + + /* Set a default priority threshold */ + write_sysreg_s(ICC_PMR_DEF_PRIO, SYS_ICC_PMR_EL1); + + /* Enable non-secure Group-1 interrupts */ + write_sysreg_s(ICC_IGRPEN1_EL1_ENABLE, SYS_ICC_GRPEN1_EL1); + + gicv3_data.redist_base[cpu] = redist_base_cpu; +} + +static void gicv3_dist_init(void) +{ + void *dist_base = gicv3_data.dist_base; + unsigned int i; + + /* Disable the distributor until we set things up */ + writel(0, dist_base + GICD_CTLR); + gicv3_gicd_wait_for_rwp(); + + /* + * Mark all the SPI interrupts as non-secure Group-1. + * Also, deactivate and disable them. + */ + for (i = 32; i < gicv3_data.nr_spis; i += 32) { + writel(~0, dist_base + GICD_IGROUPR + i / 8); + writel(~0, dist_base + GICD_ICACTIVER + i / 8); + writel(~0, dist_base + GICD_ICENABLER + i / 8); + } + + /* Set a default priority for all the SPIs */ + for (i = 32; i < gicv3_data.nr_spis; i += 4) + writel(GICD_INT_DEF_PRI_X4, + dist_base + GICD_IPRIORITYR + i); + + /* Wait for the settings to sync-in */ + gicv3_gicd_wait_for_rwp(); + + /* Finally, enable the distributor globally with ARE */ + writel(GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | + GICD_CTLR_ENABLE_G1, dist_base + GICD_CTLR); + gicv3_gicd_wait_for_rwp(); +} + +static void gicv3_init(unsigned int nr_cpus, void *dist_base) +{ + GUEST_ASSERT(nr_cpus <= GICV3_MAX_CPUS); + + gicv3_data.nr_cpus = nr_cpus; + gicv3_data.dist_base = dist_base; + gicv3_data.nr_spis = GICD_TYPER_SPIS( + readl(gicv3_data.dist_base + GICD_TYPER)); + if (gicv3_data.nr_spis > 1020) + gicv3_data.nr_spis = 1020; + + /* + * Initialize only the distributor for now. + * The redistributor and CPU interfaces are initialized + * later for every PE. + */ + gicv3_dist_init(); +} + +const struct gic_common_ops gicv3_ops = { + .gic_init = gicv3_init, + .gic_cpu_init = gicv3_cpu_init, + .gic_irq_enable = gicv3_irq_enable, + .gic_irq_disable = gicv3_irq_disable, + .gic_read_iar = gicv3_read_iar, + .gic_write_eoir = gicv3_write_eoir, + .gic_write_dir = gicv3_write_dir, + .gic_set_priority_mask = gicv3_set_priority_mask, + .gic_set_eoi_split = gicv3_set_eoi_split, + .gic_set_priority = gicv3_set_priority, + .gic_irq_set_active = gicv3_irq_set_active, + .gic_irq_clear_active = gicv3_irq_clear_active, + .gic_irq_get_active = gicv3_irq_get_active, + .gic_irq_set_pending = gicv3_irq_set_pending, + .gic_irq_clear_pending = gicv3_irq_clear_pending, + .gic_irq_get_pending = gicv3_irq_get_pending, + .gic_irq_set_config = gicv3_irq_set_config, +}; diff --git a/tools/testing/selftests/kvm/lib/aarch64/handlers.S b/tools/testing/selftests/kvm/lib/aarch64/handlers.S new file mode 100644 index 0000000000..0e443eadfa --- /dev/null +++ b/tools/testing/selftests/kvm/lib/aarch64/handlers.S @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +.macro save_registers + add sp, sp, #-16 * 17 + + stp x0, x1, [sp, #16 * 0] + stp x2, x3, [sp, #16 * 1] + stp x4, x5, [sp, #16 * 2] + stp x6, x7, [sp, #16 * 3] + stp x8, x9, [sp, #16 * 4] + stp x10, x11, [sp, #16 * 5] + stp x12, x13, [sp, #16 * 6] + stp x14, x15, [sp, #16 * 7] + stp x16, x17, [sp, #16 * 8] + stp x18, x19, [sp, #16 * 9] + stp x20, x21, [sp, #16 * 10] + stp x22, x23, [sp, #16 * 11] + stp x24, x25, [sp, #16 * 12] + stp x26, x27, [sp, #16 * 13] + stp x28, x29, [sp, #16 * 14] + + /* + * This stores sp_el1 into ex_regs.sp so exception handlers can "look" + * at it. It will _not_ be used to restore the sp on return from the + * exception so handlers can not update it. + */ + add x1, sp, #16 * 17 + stp x30, x1, [sp, #16 * 15] /* x30, SP */ + + mrs x1, elr_el1 + mrs x2, spsr_el1 + stp x1, x2, [sp, #16 * 16] /* PC, PSTATE */ +.endm + +.macro restore_registers + ldp x1, x2, [sp, #16 * 16] /* PC, PSTATE */ + msr elr_el1, x1 + msr spsr_el1, x2 + + /* sp is not restored */ + ldp x30, xzr, [sp, #16 * 15] /* x30, SP */ + + ldp x28, x29, [sp, #16 * 14] + ldp x26, x27, [sp, #16 * 13] + ldp x24, x25, [sp, #16 * 12] + ldp x22, x23, [sp, #16 * 11] + ldp x20, x21, [sp, #16 * 10] + ldp x18, x19, [sp, #16 * 9] + ldp x16, x17, [sp, #16 * 8] + ldp x14, x15, [sp, #16 * 7] + ldp x12, x13, [sp, #16 * 6] + ldp x10, x11, [sp, #16 * 5] + ldp x8, x9, [sp, #16 * 4] + ldp x6, x7, [sp, #16 * 3] + ldp x4, x5, [sp, #16 * 2] + ldp x2, x3, [sp, #16 * 1] + ldp x0, x1, [sp, #16 * 0] + + add sp, sp, #16 * 17 + + eret +.endm + +.pushsection ".entry.text", "ax" +.balign 0x800 +.global vectors +vectors: +.popsection + +.set vector, 0 + +/* + * Build an exception handler for vector and append a jump to it into + * vectors (while making sure that it's 0x80 aligned). + */ +.macro HANDLER, label +handler_\label: + save_registers + mov x0, sp + mov x1, #vector + bl route_exception + restore_registers + +.pushsection ".entry.text", "ax" +.balign 0x80 + b handler_\label +.popsection + +.set vector, vector + 1 +.endm + +.macro HANDLER_INVALID +.pushsection ".entry.text", "ax" +.balign 0x80 +/* This will abort so no need to save and restore registers. */ + mov x0, #vector + mov x1, #0 /* ec */ + mov x2, #0 /* valid_ec */ + b kvm_exit_unexpected_exception +.popsection + +.set vector, vector + 1 +.endm + +/* + * Caution: be sure to not add anything between the declaration of vectors + * above and these macro calls that will build the vectors table below it. + */ + HANDLER_INVALID // Synchronous EL1t + HANDLER_INVALID // IRQ EL1t + HANDLER_INVALID // FIQ EL1t + HANDLER_INVALID // Error EL1t + + HANDLER el1h_sync // Synchronous EL1h + HANDLER el1h_irq // IRQ EL1h + HANDLER el1h_fiq // FIQ EL1h + HANDLER el1h_error // Error EL1h + + HANDLER el0_sync_64 // Synchronous 64-bit EL0 + HANDLER el0_irq_64 // IRQ 64-bit EL0 + HANDLER el0_fiq_64 // FIQ 64-bit EL0 + HANDLER el0_error_64 // Error 64-bit EL0 + + HANDLER el0_sync_32 // Synchronous 32-bit EL0 + HANDLER el0_irq_32 // IRQ 32-bit EL0 + HANDLER el0_fiq_32 // FIQ 32-bit EL0 + HANDLER el0_error_32 // Error 32-bit EL0 diff --git a/tools/testing/selftests/kvm/lib/aarch64/processor.c b/tools/testing/selftests/kvm/lib/aarch64/processor.c new file mode 100644 index 0000000000..3a0259e253 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/aarch64/processor.c @@ -0,0 +1,586 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * AArch64 code + * + * Copyright (C) 2018, Red Hat, Inc. + */ + +#include <linux/compiler.h> +#include <assert.h> + +#include "guest_modes.h" +#include "kvm_util.h" +#include "processor.h" +#include <linux/bitfield.h> + +#define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN 0xac0000 + +static vm_vaddr_t exception_handlers; + +static uint64_t page_align(struct kvm_vm *vm, uint64_t v) +{ + return (v + vm->page_size) & ~(vm->page_size - 1); +} + +static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva) +{ + unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; + uint64_t mask = (1UL << (vm->va_bits - shift)) - 1; + + return (gva >> shift) & mask; +} + +static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva) +{ + unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift; + uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; + + TEST_ASSERT(vm->pgtable_levels == 4, + "Mode %d does not have 4 page table levels", vm->mode); + + return (gva >> shift) & mask; +} + +static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva) +{ + unsigned int shift = (vm->page_shift - 3) + vm->page_shift; + uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; + + TEST_ASSERT(vm->pgtable_levels >= 3, + "Mode %d does not have >= 3 page table levels", vm->mode); + + return (gva >> shift) & mask; +} + +static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva) +{ + uint64_t mask = (1UL << (vm->page_shift - 3)) - 1; + return (gva >> vm->page_shift) & mask; +} + +static uint64_t addr_pte(struct kvm_vm *vm, uint64_t pa, uint64_t attrs) +{ + uint64_t pte; + + pte = pa & GENMASK(47, vm->page_shift); + if (vm->page_shift == 16) + pte |= FIELD_GET(GENMASK(51, 48), pa) << 12; + pte |= attrs; + + return pte; +} + +static uint64_t pte_addr(struct kvm_vm *vm, uint64_t pte) +{ + uint64_t pa; + + pa = pte & GENMASK(47, vm->page_shift); + if (vm->page_shift == 16) + pa |= FIELD_GET(GENMASK(15, 12), pte) << 48; + + return pa; +} + +static uint64_t ptrs_per_pgd(struct kvm_vm *vm) +{ + unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift; + return 1 << (vm->va_bits - shift); +} + +static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm) +{ + return 1 << (vm->page_shift - 3); +} + +void virt_arch_pgd_alloc(struct kvm_vm *vm) +{ + size_t nr_pages = page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size; + + if (vm->pgd_created) + return; + + vm->pgd = vm_phy_pages_alloc(vm, nr_pages, + KVM_GUEST_PAGE_TABLE_MIN_PADDR, + vm->memslots[MEM_REGION_PT]); + vm->pgd_created = true; +} + +static void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, + uint64_t flags) +{ + uint8_t attr_idx = flags & 7; + uint64_t *ptep; + + TEST_ASSERT((vaddr % vm->page_size) == 0, + "Virtual address not on page boundary,\n" + " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, + (vaddr >> vm->page_shift)), + "Invalid virtual address, vaddr: 0x%lx", vaddr); + TEST_ASSERT((paddr % vm->page_size) == 0, + "Physical address not on page boundary,\n" + " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); + TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, + "Physical address beyond beyond maximum supported,\n" + " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", + paddr, vm->max_gfn, vm->page_size); + + ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8; + if (!*ptep) + *ptep = addr_pte(vm, vm_alloc_page_table(vm), 3); + + switch (vm->pgtable_levels) { + case 4: + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8; + if (!*ptep) + *ptep = addr_pte(vm, vm_alloc_page_table(vm), 3); + /* fall through */ + case 3: + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8; + if (!*ptep) + *ptep = addr_pte(vm, vm_alloc_page_table(vm), 3); + /* fall through */ + case 2: + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8; + break; + default: + TEST_FAIL("Page table levels must be 2, 3, or 4"); + } + + *ptep = addr_pte(vm, paddr, (attr_idx << 2) | (1 << 10) | 3); /* AF */ +} + +void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) +{ + uint64_t attr_idx = MT_NORMAL; + + _virt_pg_map(vm, vaddr, paddr, attr_idx); +} + +uint64_t *virt_get_pte_hva(struct kvm_vm *vm, vm_vaddr_t gva) +{ + uint64_t *ptep; + + if (!vm->pgd_created) + goto unmapped_gva; + + ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8; + if (!ptep) + goto unmapped_gva; + + switch (vm->pgtable_levels) { + case 4: + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8; + if (!ptep) + goto unmapped_gva; + /* fall through */ + case 3: + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8; + if (!ptep) + goto unmapped_gva; + /* fall through */ + case 2: + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8; + if (!ptep) + goto unmapped_gva; + break; + default: + TEST_FAIL("Page table levels must be 2, 3, or 4"); + } + + return ptep; + +unmapped_gva: + TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva); + exit(EXIT_FAILURE); +} + +vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) +{ + uint64_t *ptep = virt_get_pte_hva(vm, gva); + + return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); +} + +static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level) +{ +#ifdef DEBUG + static const char * const type[] = { "", "pud", "pmd", "pte" }; + uint64_t pte, *ptep; + + if (level == 4) + return; + + for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) { + ptep = addr_gpa2hva(vm, pte); + if (!*ptep) + continue; + fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep); + pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1); + } +#endif +} + +void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +{ + int level = 4 - (vm->pgtable_levels - 1); + uint64_t pgd, *ptep; + + if (!vm->pgd_created) + return; + + for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) { + ptep = addr_gpa2hva(vm, pgd); + if (!*ptep) + continue; + fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep); + pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level); + } +} + +void aarch64_vcpu_setup(struct kvm_vcpu *vcpu, struct kvm_vcpu_init *init) +{ + struct kvm_vcpu_init default_init = { .target = -1, }; + struct kvm_vm *vm = vcpu->vm; + uint64_t sctlr_el1, tcr_el1, ttbr0_el1; + + if (!init) + init = &default_init; + + if (init->target == -1) { + struct kvm_vcpu_init preferred; + vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &preferred); + init->target = preferred.target; + } + + vcpu_ioctl(vcpu, KVM_ARM_VCPU_INIT, init); + + /* + * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15 + * registers, which the variable argument list macros do. + */ + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CPACR_EL1), 3 << 20); + + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), &sctlr_el1); + vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), &tcr_el1); + + /* Configure base granule size */ + switch (vm->mode) { + case VM_MODE_P52V48_4K: + TEST_FAIL("AArch64 does not support 4K sized pages " + "with 52-bit physical address ranges"); + case VM_MODE_PXXV48_4K: + TEST_FAIL("AArch64 does not support 4K sized pages " + "with ANY-bit physical address ranges"); + case VM_MODE_P52V48_64K: + case VM_MODE_P48V48_64K: + case VM_MODE_P40V48_64K: + case VM_MODE_P36V48_64K: + tcr_el1 |= 1ul << 14; /* TG0 = 64KB */ + break; + case VM_MODE_P48V48_16K: + case VM_MODE_P40V48_16K: + case VM_MODE_P36V48_16K: + case VM_MODE_P36V47_16K: + tcr_el1 |= 2ul << 14; /* TG0 = 16KB */ + break; + case VM_MODE_P48V48_4K: + case VM_MODE_P40V48_4K: + case VM_MODE_P36V48_4K: + tcr_el1 |= 0ul << 14; /* TG0 = 4KB */ + break; + default: + TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode); + } + + ttbr0_el1 = vm->pgd & GENMASK(47, vm->page_shift); + + /* Configure output size */ + switch (vm->mode) { + case VM_MODE_P52V48_64K: + tcr_el1 |= 6ul << 32; /* IPS = 52 bits */ + ttbr0_el1 |= FIELD_GET(GENMASK(51, 48), vm->pgd) << 2; + break; + case VM_MODE_P48V48_4K: + case VM_MODE_P48V48_16K: + case VM_MODE_P48V48_64K: + tcr_el1 |= 5ul << 32; /* IPS = 48 bits */ + break; + case VM_MODE_P40V48_4K: + case VM_MODE_P40V48_16K: + case VM_MODE_P40V48_64K: + tcr_el1 |= 2ul << 32; /* IPS = 40 bits */ + break; + case VM_MODE_P36V48_4K: + case VM_MODE_P36V48_16K: + case VM_MODE_P36V48_64K: + case VM_MODE_P36V47_16K: + tcr_el1 |= 1ul << 32; /* IPS = 36 bits */ + break; + default: + TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode); + } + + sctlr_el1 |= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */; + /* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */; + tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12); + tcr_el1 |= (64 - vm->va_bits) /* T0SZ */; + + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_SCTLR_EL1), sctlr_el1); + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TCR_EL1), tcr_el1); + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MAIR_EL1), DEFAULT_MAIR_EL1); + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TTBR0_EL1), ttbr0_el1); + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_TPIDR_EL1), vcpu->id); +} + +void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) +{ + uint64_t pstate, pc; + + vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pstate), &pstate); + vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc), &pc); + + fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n", + indent, "", pstate, pc); +} + +struct kvm_vcpu *aarch64_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, + struct kvm_vcpu_init *init, void *guest_code) +{ + size_t stack_size; + uint64_t stack_vaddr; + struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id); + + stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size : + vm->page_size; + stack_vaddr = __vm_vaddr_alloc(vm, stack_size, + DEFAULT_ARM64_GUEST_STACK_VADDR_MIN, + MEM_REGION_DATA); + + aarch64_vcpu_setup(vcpu, init); + + vcpu_set_reg(vcpu, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size); + vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code); + + return vcpu; +} + +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, + void *guest_code) +{ + return aarch64_vcpu_add(vm, vcpu_id, NULL, guest_code); +} + +void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) +{ + va_list ap; + int i; + + TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n" + " num: %u\n", num); + + va_start(ap, num); + + for (i = 0; i < num; i++) { + vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.regs[i]), + va_arg(ap, uint64_t)); + } + + va_end(ap); +} + +void kvm_exit_unexpected_exception(int vector, uint64_t ec, bool valid_ec) +{ + ucall(UCALL_UNHANDLED, 3, vector, ec, valid_ec); + while (1) + ; +} + +void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) +{ + struct ucall uc; + + if (get_ucall(vcpu, &uc) != UCALL_UNHANDLED) + return; + + if (uc.args[2]) /* valid_ec */ { + assert(VECTOR_IS_SYNC(uc.args[0])); + TEST_FAIL("Unexpected exception (vector:0x%lx, ec:0x%lx)", + uc.args[0], uc.args[1]); + } else { + assert(!VECTOR_IS_SYNC(uc.args[0])); + TEST_FAIL("Unexpected exception (vector:0x%lx)", + uc.args[0]); + } +} + +struct handlers { + handler_fn exception_handlers[VECTOR_NUM][ESR_EC_NUM]; +}; + +void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu) +{ + extern char vectors; + + vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_VBAR_EL1), (uint64_t)&vectors); +} + +void route_exception(struct ex_regs *regs, int vector) +{ + struct handlers *handlers = (struct handlers *)exception_handlers; + bool valid_ec; + int ec = 0; + + switch (vector) { + case VECTOR_SYNC_CURRENT: + case VECTOR_SYNC_LOWER_64: + ec = (read_sysreg(esr_el1) >> ESR_EC_SHIFT) & ESR_EC_MASK; + valid_ec = true; + break; + case VECTOR_IRQ_CURRENT: + case VECTOR_IRQ_LOWER_64: + case VECTOR_FIQ_CURRENT: + case VECTOR_FIQ_LOWER_64: + case VECTOR_ERROR_CURRENT: + case VECTOR_ERROR_LOWER_64: + ec = 0; + valid_ec = false; + break; + default: + valid_ec = false; + goto unexpected_exception; + } + + if (handlers && handlers->exception_handlers[vector][ec]) + return handlers->exception_handlers[vector][ec](regs); + +unexpected_exception: + kvm_exit_unexpected_exception(vector, ec, valid_ec); +} + +void vm_init_descriptor_tables(struct kvm_vm *vm) +{ + vm->handlers = __vm_vaddr_alloc(vm, sizeof(struct handlers), + vm->page_size, MEM_REGION_DATA); + + *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; +} + +void vm_install_sync_handler(struct kvm_vm *vm, int vector, int ec, + void (*handler)(struct ex_regs *)) +{ + struct handlers *handlers = addr_gva2hva(vm, vm->handlers); + + assert(VECTOR_IS_SYNC(vector)); + assert(vector < VECTOR_NUM); + assert(ec < ESR_EC_NUM); + handlers->exception_handlers[vector][ec] = handler; +} + +void vm_install_exception_handler(struct kvm_vm *vm, int vector, + void (*handler)(struct ex_regs *)) +{ + struct handlers *handlers = addr_gva2hva(vm, vm->handlers); + + assert(!VECTOR_IS_SYNC(vector)); + assert(vector < VECTOR_NUM); + handlers->exception_handlers[vector][0] = handler; +} + +uint32_t guest_get_vcpuid(void) +{ + return read_sysreg(tpidr_el1); +} + +void aarch64_get_supported_page_sizes(uint32_t ipa, + bool *ps4k, bool *ps16k, bool *ps64k) +{ + struct kvm_vcpu_init preferred_init; + int kvm_fd, vm_fd, vcpu_fd, err; + uint64_t val; + struct kvm_one_reg reg = { + .id = KVM_ARM64_SYS_REG(SYS_ID_AA64MMFR0_EL1), + .addr = (uint64_t)&val, + }; + + kvm_fd = open_kvm_dev_path_or_exit(); + vm_fd = __kvm_ioctl(kvm_fd, KVM_CREATE_VM, (void *)(unsigned long)ipa); + TEST_ASSERT(vm_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm_fd)); + + vcpu_fd = ioctl(vm_fd, KVM_CREATE_VCPU, 0); + TEST_ASSERT(vcpu_fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu_fd)); + + err = ioctl(vm_fd, KVM_ARM_PREFERRED_TARGET, &preferred_init); + TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_ARM_PREFERRED_TARGET, err)); + err = ioctl(vcpu_fd, KVM_ARM_VCPU_INIT, &preferred_init); + TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_ARM_VCPU_INIT, err)); + + err = ioctl(vcpu_fd, KVM_GET_ONE_REG, ®); + TEST_ASSERT(err == 0, KVM_IOCTL_ERROR(KVM_GET_ONE_REG, vcpu_fd)); + + *ps4k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_TGRAN4), val) != 0xf; + *ps64k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_TGRAN64), val) == 0; + *ps16k = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64MMFR0_TGRAN16), val) != 0; + + close(vcpu_fd); + close(vm_fd); + close(kvm_fd); +} + +#define __smccc_call(insn, function_id, arg0, arg1, arg2, arg3, arg4, arg5, \ + arg6, res) \ + asm volatile("mov w0, %w[function_id]\n" \ + "mov x1, %[arg0]\n" \ + "mov x2, %[arg1]\n" \ + "mov x3, %[arg2]\n" \ + "mov x4, %[arg3]\n" \ + "mov x5, %[arg4]\n" \ + "mov x6, %[arg5]\n" \ + "mov x7, %[arg6]\n" \ + #insn "#0\n" \ + "mov %[res0], x0\n" \ + "mov %[res1], x1\n" \ + "mov %[res2], x2\n" \ + "mov %[res3], x3\n" \ + : [res0] "=r"(res->a0), [res1] "=r"(res->a1), \ + [res2] "=r"(res->a2), [res3] "=r"(res->a3) \ + : [function_id] "r"(function_id), [arg0] "r"(arg0), \ + [arg1] "r"(arg1), [arg2] "r"(arg2), [arg3] "r"(arg3), \ + [arg4] "r"(arg4), [arg5] "r"(arg5), [arg6] "r"(arg6) \ + : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7") + + +void smccc_hvc(uint32_t function_id, uint64_t arg0, uint64_t arg1, + uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, + uint64_t arg6, struct arm_smccc_res *res) +{ + __smccc_call(hvc, function_id, arg0, arg1, arg2, arg3, arg4, arg5, + arg6, res); +} + +void smccc_smc(uint32_t function_id, uint64_t arg0, uint64_t arg1, + uint64_t arg2, uint64_t arg3, uint64_t arg4, uint64_t arg5, + uint64_t arg6, struct arm_smccc_res *res) +{ + __smccc_call(smc, function_id, arg0, arg1, arg2, arg3, arg4, arg5, + arg6, res); +} + +void kvm_selftest_arch_init(void) +{ + /* + * arm64 doesn't have a true default mode, so start by computing the + * available IPA space and page sizes early. + */ + guest_modes_append_default(); +} + +void vm_vaddr_populate_bitmap(struct kvm_vm *vm) +{ + /* + * arm64 selftests use only TTBR0_EL1, meaning that the valid VA space + * is [0, 2^(64 - TCR_EL1.T0SZ)). + */ + sparsebit_set_num(vm->vpages_valid, 0, + (1ULL << vm->va_bits) >> vm->page_shift); +} diff --git a/tools/testing/selftests/kvm/lib/aarch64/spinlock.c b/tools/testing/selftests/kvm/lib/aarch64/spinlock.c new file mode 100644 index 0000000000..a076e780be --- /dev/null +++ b/tools/testing/selftests/kvm/lib/aarch64/spinlock.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM64 Spinlock support + */ +#include <stdint.h> + +#include "spinlock.h" + +void spin_lock(struct spinlock *lock) +{ + int val, res; + + asm volatile( + "1: ldaxr %w0, [%2]\n" + " cbnz %w0, 1b\n" + " mov %w0, #1\n" + " stxr %w1, %w0, [%2]\n" + " cbnz %w1, 1b\n" + : "=&r" (val), "=&r" (res) + : "r" (&lock->v) + : "memory"); +} + +void spin_unlock(struct spinlock *lock) +{ + asm volatile("stlr wzr, [%0]\n" : : "r" (&lock->v) : "memory"); +} diff --git a/tools/testing/selftests/kvm/lib/aarch64/ucall.c b/tools/testing/selftests/kvm/lib/aarch64/ucall.c new file mode 100644 index 0000000000..ddab0ce89d --- /dev/null +++ b/tools/testing/selftests/kvm/lib/aarch64/ucall.c @@ -0,0 +1,34 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ucall support. A ucall is a "hypercall to userspace". + * + * Copyright (C) 2018, Red Hat, Inc. + */ +#include "kvm_util.h" + +vm_vaddr_t *ucall_exit_mmio_addr; + +void ucall_arch_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) +{ + vm_vaddr_t mmio_gva = vm_vaddr_unused_gap(vm, vm->page_size, KVM_UTIL_MIN_VADDR); + + virt_map(vm, mmio_gva, mmio_gpa, 1); + + vm->ucall_mmio_addr = mmio_gpa; + + write_guest_global(vm, ucall_exit_mmio_addr, (vm_vaddr_t *)mmio_gva); +} + +void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + + if (run->exit_reason == KVM_EXIT_MMIO && + run->mmio.phys_addr == vcpu->vm->ucall_mmio_addr) { + TEST_ASSERT(run->mmio.is_write && run->mmio.len == sizeof(uint64_t), + "Unexpected ucall exit mmio address access"); + return (void *)(*((uint64_t *)run->mmio.data)); + } + + return NULL; +} diff --git a/tools/testing/selftests/kvm/lib/aarch64/vgic.c b/tools/testing/selftests/kvm/lib/aarch64/vgic.c new file mode 100644 index 0000000000..b5f28d21a9 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/aarch64/vgic.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ARM Generic Interrupt Controller (GIC) v3 host support + */ + +#include <linux/kvm.h> +#include <linux/sizes.h> +#include <asm/kvm_para.h> +#include <asm/kvm.h> + +#include "kvm_util.h" +#include "vgic.h" +#include "gic.h" +#include "gic_v3.h" + +/* + * vGIC-v3 default host setup + * + * Input args: + * vm - KVM VM + * nr_vcpus - Number of vCPUs supported by this VM + * gicd_base_gpa - Guest Physical Address of the Distributor region + * gicr_base_gpa - Guest Physical Address of the Redistributor region + * + * Output args: None + * + * Return: GIC file-descriptor or negative error code upon failure + * + * The function creates a vGIC-v3 device and maps the distributor and + * redistributor regions of the guest. Since it depends on the number of + * vCPUs for the VM, it must be called after all the vCPUs have been created. + */ +int vgic_v3_setup(struct kvm_vm *vm, unsigned int nr_vcpus, uint32_t nr_irqs, + uint64_t gicd_base_gpa, uint64_t gicr_base_gpa) +{ + int gic_fd; + uint64_t redist_attr; + struct list_head *iter; + unsigned int nr_gic_pages, nr_vcpus_created = 0; + + TEST_ASSERT(nr_vcpus, "Number of vCPUs cannot be empty\n"); + + /* + * Make sure that the caller is infact calling this + * function after all the vCPUs are added. + */ + list_for_each(iter, &vm->vcpus) + nr_vcpus_created++; + TEST_ASSERT(nr_vcpus == nr_vcpus_created, + "Number of vCPUs requested (%u) doesn't match with the ones created for the VM (%u)\n", + nr_vcpus, nr_vcpus_created); + + /* Distributor setup */ + gic_fd = __kvm_create_device(vm, KVM_DEV_TYPE_ARM_VGIC_V3); + if (gic_fd < 0) + return gic_fd; + + kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_NR_IRQS, 0, &nr_irqs); + + kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_VGIC_CTRL_INIT, NULL); + + kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_DIST, &gicd_base_gpa); + nr_gic_pages = vm_calc_num_guest_pages(vm->mode, KVM_VGIC_V3_DIST_SIZE); + virt_map(vm, gicd_base_gpa, gicd_base_gpa, nr_gic_pages); + + /* Redistributor setup */ + redist_attr = REDIST_REGION_ATTR_ADDR(nr_vcpus, gicr_base_gpa, 0, 0); + kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_ADDR, + KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION, &redist_attr); + nr_gic_pages = vm_calc_num_guest_pages(vm->mode, + KVM_VGIC_V3_REDIST_SIZE * nr_vcpus); + virt_map(vm, gicr_base_gpa, gicr_base_gpa, nr_gic_pages); + + kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_CTRL, + KVM_DEV_ARM_VGIC_CTRL_INIT, NULL); + + return gic_fd; +} + +/* should only work for level sensitive interrupts */ +int _kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level) +{ + uint64_t attr = 32 * (intid / 32); + uint64_t index = intid % 32; + uint64_t val; + int ret; + + ret = __kvm_device_attr_get(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, + attr, &val); + if (ret != 0) + return ret; + + val |= 1U << index; + ret = __kvm_device_attr_set(gic_fd, KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, + attr, &val); + return ret; +} + +void kvm_irq_set_level_info(int gic_fd, uint32_t intid, int level) +{ + int ret = _kvm_irq_set_level_info(gic_fd, intid, level); + + TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO, ret)); +} + +int _kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level) +{ + uint32_t irq = intid & KVM_ARM_IRQ_NUM_MASK; + + TEST_ASSERT(!INTID_IS_SGI(intid), "KVM_IRQ_LINE's interface itself " + "doesn't allow injecting SGIs. There's no mask for it."); + + if (INTID_IS_PPI(intid)) + irq |= KVM_ARM_IRQ_TYPE_PPI << KVM_ARM_IRQ_TYPE_SHIFT; + else + irq |= KVM_ARM_IRQ_TYPE_SPI << KVM_ARM_IRQ_TYPE_SHIFT; + + return _kvm_irq_line(vm, irq, level); +} + +void kvm_arm_irq_line(struct kvm_vm *vm, uint32_t intid, int level) +{ + int ret = _kvm_arm_irq_line(vm, intid, level); + + TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret)); +} + +static void vgic_poke_irq(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu, + uint64_t reg_off) +{ + uint64_t reg = intid / 32; + uint64_t index = intid % 32; + uint64_t attr = reg_off + reg * 4; + uint64_t val; + bool intid_is_private = INTID_IS_SGI(intid) || INTID_IS_PPI(intid); + + uint32_t group = intid_is_private ? KVM_DEV_ARM_VGIC_GRP_REDIST_REGS + : KVM_DEV_ARM_VGIC_GRP_DIST_REGS; + + if (intid_is_private) { + /* TODO: only vcpu 0 implemented for now. */ + assert(vcpu->id == 0); + attr += SZ_64K; + } + + /* Check that the addr part of the attr is within 32 bits. */ + assert((attr & ~KVM_DEV_ARM_VGIC_OFFSET_MASK) == 0); + + /* + * All calls will succeed, even with invalid intid's, as long as the + * addr part of the attr is within 32 bits (checked above). An invalid + * intid will just make the read/writes point to above the intended + * register space (i.e., ICPENDR after ISPENDR). + */ + kvm_device_attr_get(gic_fd, group, attr, &val); + val |= 1ULL << index; + kvm_device_attr_set(gic_fd, group, attr, &val); +} + +void kvm_irq_write_ispendr(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu) +{ + vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISPENDR); +} + +void kvm_irq_write_isactiver(int gic_fd, uint32_t intid, struct kvm_vcpu *vcpu) +{ + vgic_poke_irq(gic_fd, intid, vcpu, GICD_ISACTIVER); +} diff --git a/tools/testing/selftests/kvm/lib/assert.c b/tools/testing/selftests/kvm/lib/assert.c new file mode 100644 index 0000000000..2bd25b191d --- /dev/null +++ b/tools/testing/selftests/kvm/lib/assert.c @@ -0,0 +1,99 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * tools/testing/selftests/kvm/lib/assert.c + * + * Copyright (C) 2018, Google LLC. + */ + +#define _GNU_SOURCE /* for getline(3) and strchrnul(3)*/ + +#include "test_util.h" + +#include <execinfo.h> +#include <sys/syscall.h> + +#include "kselftest.h" + +/* Dumps the current stack trace to stderr. */ +static void __attribute__((noinline)) test_dump_stack(void); +static void test_dump_stack(void) +{ + /* + * Build and run this command: + * + * addr2line -s -e /proc/$PPID/exe -fpai {backtrace addresses} | \ + * cat -n 1>&2 + * + * Note that the spacing is different and there's no newline. + */ + size_t i; + size_t n = 20; + void *stack[n]; + const char *addr2line = "addr2line -s -e /proc/$PPID/exe -fpai"; + const char *pipeline = "|cat -n 1>&2"; + char cmd[strlen(addr2line) + strlen(pipeline) + + /* N bytes per addr * 2 digits per byte + 1 space per addr: */ + n * (((sizeof(void *)) * 2) + 1) + + /* Null terminator: */ + 1]; + char *c = cmd; + + n = backtrace(stack, n); + /* + * Skip the first 2 frames, which should be test_dump_stack() and + * test_assert(); both of which are declared noinline. Bail if the + * resulting stack trace would be empty. Otherwise, addr2line will block + * waiting for addresses to be passed in via stdin. + */ + if (n <= 2) { + fputs(" (stack trace empty)\n", stderr); + return; + } + + c += sprintf(c, "%s", addr2line); + for (i = 2; i < n; i++) + c += sprintf(c, " %lx", ((unsigned long) stack[i]) - 1); + + c += sprintf(c, "%s", pipeline); +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wunused-result" + system(cmd); +#pragma GCC diagnostic pop +} + +static pid_t _gettid(void) +{ + return syscall(SYS_gettid); +} + +void __attribute__((noinline)) +test_assert(bool exp, const char *exp_str, + const char *file, unsigned int line, const char *fmt, ...) +{ + va_list ap; + + if (!(exp)) { + va_start(ap, fmt); + + fprintf(stderr, "==== Test Assertion Failure ====\n" + " %s:%u: %s\n" + " pid=%d tid=%d errno=%d - %s\n", + file, line, exp_str, getpid(), _gettid(), + errno, strerror(errno)); + test_dump_stack(); + if (fmt) { + fputs(" ", stderr); + vfprintf(stderr, fmt, ap); + fputs("\n", stderr); + } + va_end(ap); + + if (errno == EACCES) { + print_skip("Access denied - Exiting"); + exit(KSFT_SKIP); + } + exit(254); + } + + return; +} diff --git a/tools/testing/selftests/kvm/lib/elf.c b/tools/testing/selftests/kvm/lib/elf.c new file mode 100644 index 0000000000..266f3876e1 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/elf.c @@ -0,0 +1,195 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * tools/testing/selftests/kvm/lib/elf.c + * + * Copyright (C) 2018, Google LLC. + */ + +#include "test_util.h" + +#include <bits/endian.h> +#include <linux/elf.h> + +#include "kvm_util.h" + +static void elfhdr_get(const char *filename, Elf64_Ehdr *hdrp) +{ + off_t offset_rv; + + /* Open the ELF file. */ + int fd; + fd = open(filename, O_RDONLY); + TEST_ASSERT(fd >= 0, "Failed to open ELF file,\n" + " filename: %s\n" + " rv: %i errno: %i", filename, fd, errno); + + /* Read in and validate ELF Identification Record. + * The ELF Identification record is the first 16 (EI_NIDENT) bytes + * of the ELF header, which is at the beginning of the ELF file. + * For now it is only safe to read the first EI_NIDENT bytes. Once + * read and validated, the value of e_ehsize can be used to determine + * the real size of the ELF header. + */ + unsigned char ident[EI_NIDENT]; + test_read(fd, ident, sizeof(ident)); + TEST_ASSERT((ident[EI_MAG0] == ELFMAG0) && (ident[EI_MAG1] == ELFMAG1) + && (ident[EI_MAG2] == ELFMAG2) && (ident[EI_MAG3] == ELFMAG3), + "ELF MAGIC Mismatch,\n" + " filename: %s\n" + " ident[EI_MAG0 - EI_MAG3]: %02x %02x %02x %02x\n" + " Expected: %02x %02x %02x %02x", + filename, + ident[EI_MAG0], ident[EI_MAG1], ident[EI_MAG2], ident[EI_MAG3], + ELFMAG0, ELFMAG1, ELFMAG2, ELFMAG3); + TEST_ASSERT(ident[EI_CLASS] == ELFCLASS64, + "Current implementation only able to handle ELFCLASS64,\n" + " filename: %s\n" + " ident[EI_CLASS]: %02x\n" + " expected: %02x", + filename, + ident[EI_CLASS], ELFCLASS64); + TEST_ASSERT(((BYTE_ORDER == LITTLE_ENDIAN) + && (ident[EI_DATA] == ELFDATA2LSB)) + || ((BYTE_ORDER == BIG_ENDIAN) + && (ident[EI_DATA] == ELFDATA2MSB)), "Current " + "implementation only able to handle\n" + "cases where the host and ELF file endianness\n" + "is the same:\n" + " host BYTE_ORDER: %u\n" + " host LITTLE_ENDIAN: %u\n" + " host BIG_ENDIAN: %u\n" + " ident[EI_DATA]: %u\n" + " ELFDATA2LSB: %u\n" + " ELFDATA2MSB: %u", + BYTE_ORDER, LITTLE_ENDIAN, BIG_ENDIAN, + ident[EI_DATA], ELFDATA2LSB, ELFDATA2MSB); + TEST_ASSERT(ident[EI_VERSION] == EV_CURRENT, + "Current implementation only able to handle current " + "ELF version,\n" + " filename: %s\n" + " ident[EI_VERSION]: %02x\n" + " expected: %02x", + filename, ident[EI_VERSION], EV_CURRENT); + + /* Read in the ELF header. + * With the ELF Identification portion of the ELF header + * validated, especially that the value at EI_VERSION is + * as expected, it is now safe to read the entire ELF header. + */ + offset_rv = lseek(fd, 0, SEEK_SET); + TEST_ASSERT(offset_rv == 0, "Seek to ELF header failed,\n" + " rv: %zi expected: %i", offset_rv, 0); + test_read(fd, hdrp, sizeof(*hdrp)); + TEST_ASSERT(hdrp->e_phentsize == sizeof(Elf64_Phdr), + "Unexpected physical header size,\n" + " hdrp->e_phentsize: %x\n" + " expected: %zx", + hdrp->e_phentsize, sizeof(Elf64_Phdr)); + TEST_ASSERT(hdrp->e_shentsize == sizeof(Elf64_Shdr), + "Unexpected section header size,\n" + " hdrp->e_shentsize: %x\n" + " expected: %zx", + hdrp->e_shentsize, sizeof(Elf64_Shdr)); + close(fd); +} + +/* VM ELF Load + * + * Input Args: + * filename - Path to ELF file + * + * Output Args: None + * + * Input/Output Args: + * vm - Pointer to opaque type that describes the VM. + * + * Return: None, TEST_ASSERT failures for all error conditions + * + * Loads the program image of the ELF file specified by filename, + * into the virtual address space of the VM pointed to by vm. On entry + * the VM needs to not be using any of the virtual address space used + * by the image and it needs to have sufficient available physical pages, to + * back the virtual pages used to load the image. + */ +void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename) +{ + off_t offset, offset_rv; + Elf64_Ehdr hdr; + + /* Open the ELF file. */ + int fd; + fd = open(filename, O_RDONLY); + TEST_ASSERT(fd >= 0, "Failed to open ELF file,\n" + " filename: %s\n" + " rv: %i errno: %i", filename, fd, errno); + + /* Read in the ELF header. */ + elfhdr_get(filename, &hdr); + + /* For each program header. + * The following ELF header members specify the location + * and size of the program headers: + * + * e_phoff - File offset to start of program headers + * e_phentsize - Size of each program header + * e_phnum - Number of program header entries + */ + for (unsigned int n1 = 0; n1 < hdr.e_phnum; n1++) { + /* Seek to the beginning of the program header. */ + offset = hdr.e_phoff + (n1 * hdr.e_phentsize); + offset_rv = lseek(fd, offset, SEEK_SET); + TEST_ASSERT(offset_rv == offset, + "Failed to seek to beginning of program header %u,\n" + " filename: %s\n" + " rv: %jd errno: %i", + n1, filename, (intmax_t) offset_rv, errno); + + /* Read in the program header. */ + Elf64_Phdr phdr; + test_read(fd, &phdr, sizeof(phdr)); + + /* Skip if this header doesn't describe a loadable segment. */ + if (phdr.p_type != PT_LOAD) + continue; + + /* Allocate memory for this segment within the VM. */ + TEST_ASSERT(phdr.p_memsz > 0, "Unexpected loadable segment " + "memsize of 0,\n" + " phdr index: %u p_memsz: 0x%" PRIx64, + n1, (uint64_t) phdr.p_memsz); + vm_vaddr_t seg_vstart = align_down(phdr.p_vaddr, vm->page_size); + vm_vaddr_t seg_vend = phdr.p_vaddr + phdr.p_memsz - 1; + seg_vend |= vm->page_size - 1; + size_t seg_size = seg_vend - seg_vstart + 1; + + vm_vaddr_t vaddr = __vm_vaddr_alloc(vm, seg_size, seg_vstart, + MEM_REGION_CODE); + TEST_ASSERT(vaddr == seg_vstart, "Unable to allocate " + "virtual memory for segment at requested min addr,\n" + " segment idx: %u\n" + " seg_vstart: 0x%lx\n" + " vaddr: 0x%lx", + n1, seg_vstart, vaddr); + memset(addr_gva2hva(vm, vaddr), 0, seg_size); + /* TODO(lhuemill): Set permissions of each memory segment + * based on the least-significant 3 bits of phdr.p_flags. + */ + + /* Load portion of initial state that is contained within + * the ELF file. + */ + if (phdr.p_filesz) { + offset_rv = lseek(fd, phdr.p_offset, SEEK_SET); + TEST_ASSERT(offset_rv == phdr.p_offset, + "Seek to program segment offset failed,\n" + " program header idx: %u errno: %i\n" + " offset_rv: 0x%jx\n" + " expected: 0x%jx\n", + n1, errno, (intmax_t) offset_rv, + (intmax_t) phdr.p_offset); + test_read(fd, addr_gva2hva(vm, phdr.p_vaddr), + phdr.p_filesz); + } + } + close(fd); +} diff --git a/tools/testing/selftests/kvm/lib/guest_modes.c b/tools/testing/selftests/kvm/lib/guest_modes.c new file mode 100644 index 0000000000..1df3ce4b16 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/guest_modes.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020, Red Hat, Inc. + */ +#include "guest_modes.h" + +#ifdef __aarch64__ +#include "processor.h" +enum vm_guest_mode vm_mode_default; +#endif + +struct guest_mode guest_modes[NUM_VM_MODES]; + +void guest_modes_append_default(void) +{ +#ifndef __aarch64__ + guest_mode_append(VM_MODE_DEFAULT, true, true); +#else + { + unsigned int limit = kvm_check_cap(KVM_CAP_ARM_VM_IPA_SIZE); + bool ps4k, ps16k, ps64k; + int i; + + aarch64_get_supported_page_sizes(limit, &ps4k, &ps16k, &ps64k); + + vm_mode_default = NUM_VM_MODES; + + if (limit >= 52) + guest_mode_append(VM_MODE_P52V48_64K, ps64k, ps64k); + if (limit >= 48) { + guest_mode_append(VM_MODE_P48V48_4K, ps4k, ps4k); + guest_mode_append(VM_MODE_P48V48_16K, ps16k, ps16k); + guest_mode_append(VM_MODE_P48V48_64K, ps64k, ps64k); + } + if (limit >= 40) { + guest_mode_append(VM_MODE_P40V48_4K, ps4k, ps4k); + guest_mode_append(VM_MODE_P40V48_16K, ps16k, ps16k); + guest_mode_append(VM_MODE_P40V48_64K, ps64k, ps64k); + if (ps4k) + vm_mode_default = VM_MODE_P40V48_4K; + } + if (limit >= 36) { + guest_mode_append(VM_MODE_P36V48_4K, ps4k, ps4k); + guest_mode_append(VM_MODE_P36V48_16K, ps16k, ps16k); + guest_mode_append(VM_MODE_P36V48_64K, ps64k, ps64k); + guest_mode_append(VM_MODE_P36V47_16K, ps16k, ps16k); + } + + /* + * Pick the first supported IPA size if the default + * isn't available. + */ + for (i = 0; vm_mode_default == NUM_VM_MODES && i < NUM_VM_MODES; i++) { + if (guest_modes[i].supported && guest_modes[i].enabled) + vm_mode_default = i; + } + + TEST_ASSERT(vm_mode_default != NUM_VM_MODES, + "No supported mode!"); + } +#endif +#ifdef __s390x__ + { + int kvm_fd, vm_fd; + struct kvm_s390_vm_cpu_processor info; + + kvm_fd = open_kvm_dev_path_or_exit(); + vm_fd = __kvm_ioctl(kvm_fd, KVM_CREATE_VM, NULL); + kvm_device_attr_get(vm_fd, KVM_S390_VM_CPU_MODEL, + KVM_S390_VM_CPU_PROCESSOR, &info); + close(vm_fd); + close(kvm_fd); + /* Starting with z13 we have 47bits of physical address */ + if (info.ibc >= 0x30) + guest_mode_append(VM_MODE_P47V64_4K, true, true); + } +#endif +#ifdef __riscv + { + unsigned int sz = kvm_check_cap(KVM_CAP_VM_GPA_BITS); + + if (sz >= 52) + guest_mode_append(VM_MODE_P52V48_4K, true, true); + if (sz >= 48) + guest_mode_append(VM_MODE_P48V48_4K, true, true); + } +#endif +} + +void for_each_guest_mode(void (*func)(enum vm_guest_mode, void *), void *arg) +{ + int i; + + for (i = 0; i < NUM_VM_MODES; ++i) { + if (!guest_modes[i].enabled) + continue; + TEST_ASSERT(guest_modes[i].supported, + "Guest mode ID %d (%s) not supported.", + i, vm_guest_mode_string(i)); + func(i, arg); + } +} + +void guest_modes_help(void) +{ + int i; + + printf(" -m: specify the guest mode ID to test\n" + " (default: test all supported modes)\n" + " This option may be used multiple times.\n" + " Guest mode IDs:\n"); + for (i = 0; i < NUM_VM_MODES; ++i) { + printf(" %d: %s%s\n", i, vm_guest_mode_string(i), + guest_modes[i].supported ? " (supported)" : ""); + } +} + +void guest_modes_cmdline(const char *arg) +{ + static bool mode_selected; + unsigned int mode; + int i; + + if (!mode_selected) { + for (i = 0; i < NUM_VM_MODES; ++i) + guest_modes[i].enabled = false; + mode_selected = true; + } + + mode = atoi_non_negative("Guest mode ID", arg); + TEST_ASSERT(mode < NUM_VM_MODES, "Guest mode ID %d too big", mode); + guest_modes[mode].enabled = true; +} diff --git a/tools/testing/selftests/kvm/lib/guest_sprintf.c b/tools/testing/selftests/kvm/lib/guest_sprintf.c new file mode 100644 index 0000000000..74627514c4 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/guest_sprintf.c @@ -0,0 +1,314 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include "test_util.h" +#include "kvm_util.h" +#include "ucall_common.h" + +#define APPEND_BUFFER_SAFE(str, end, v) \ +do { \ + GUEST_ASSERT(str < end); \ + *str++ = (v); \ +} while (0) + +static int isdigit(int ch) +{ + return (ch >= '0') && (ch <= '9'); +} + +static int skip_atoi(const char **s) +{ + int i = 0; + + while (isdigit(**s)) + i = i * 10 + *((*s)++) - '0'; + return i; +} + +#define ZEROPAD 1 /* pad with zero */ +#define SIGN 2 /* unsigned/signed long */ +#define PLUS 4 /* show plus */ +#define SPACE 8 /* space if plus */ +#define LEFT 16 /* left justified */ +#define SMALL 32 /* Must be 32 == 0x20 */ +#define SPECIAL 64 /* 0x */ + +#define __do_div(n, base) \ +({ \ + int __res; \ + \ + __res = ((uint64_t) n) % (uint32_t) base; \ + n = ((uint64_t) n) / (uint32_t) base; \ + __res; \ +}) + +static char *number(char *str, const char *end, long num, int base, int size, + int precision, int type) +{ + /* we are called with base 8, 10 or 16, only, thus don't need "G..." */ + static const char digits[16] = "0123456789ABCDEF"; /* "GHIJKLMNOPQRSTUVWXYZ"; */ + + char tmp[66]; + char c, sign, locase; + int i; + + /* + * locase = 0 or 0x20. ORing digits or letters with 'locase' + * produces same digits or (maybe lowercased) letters + */ + locase = (type & SMALL); + if (type & LEFT) + type &= ~ZEROPAD; + if (base < 2 || base > 16) + return NULL; + c = (type & ZEROPAD) ? '0' : ' '; + sign = 0; + if (type & SIGN) { + if (num < 0) { + sign = '-'; + num = -num; + size--; + } else if (type & PLUS) { + sign = '+'; + size--; + } else if (type & SPACE) { + sign = ' '; + size--; + } + } + if (type & SPECIAL) { + if (base == 16) + size -= 2; + else if (base == 8) + size--; + } + i = 0; + if (num == 0) + tmp[i++] = '0'; + else + while (num != 0) + tmp[i++] = (digits[__do_div(num, base)] | locase); + if (i > precision) + precision = i; + size -= precision; + if (!(type & (ZEROPAD + LEFT))) + while (size-- > 0) + APPEND_BUFFER_SAFE(str, end, ' '); + if (sign) + APPEND_BUFFER_SAFE(str, end, sign); + if (type & SPECIAL) { + if (base == 8) + APPEND_BUFFER_SAFE(str, end, '0'); + else if (base == 16) { + APPEND_BUFFER_SAFE(str, end, '0'); + APPEND_BUFFER_SAFE(str, end, 'x'); + } + } + if (!(type & LEFT)) + while (size-- > 0) + APPEND_BUFFER_SAFE(str, end, c); + while (i < precision--) + APPEND_BUFFER_SAFE(str, end, '0'); + while (i-- > 0) + APPEND_BUFFER_SAFE(str, end, tmp[i]); + while (size-- > 0) + APPEND_BUFFER_SAFE(str, end, ' '); + + return str; +} + +int guest_vsnprintf(char *buf, int n, const char *fmt, va_list args) +{ + char *str, *end; + const char *s; + uint64_t num; + int i, base; + int len; + + int flags; /* flags to number() */ + + int field_width; /* width of output field */ + int precision; /* + * min. # of digits for integers; max + * number of chars for from string + */ + int qualifier; /* 'h', 'l', or 'L' for integer fields */ + + end = buf + n; + GUEST_ASSERT(buf < end); + GUEST_ASSERT(n > 0); + + for (str = buf; *fmt; ++fmt) { + if (*fmt != '%') { + APPEND_BUFFER_SAFE(str, end, *fmt); + continue; + } + + /* process flags */ + flags = 0; +repeat: + ++fmt; /* this also skips first '%' */ + switch (*fmt) { + case '-': + flags |= LEFT; + goto repeat; + case '+': + flags |= PLUS; + goto repeat; + case ' ': + flags |= SPACE; + goto repeat; + case '#': + flags |= SPECIAL; + goto repeat; + case '0': + flags |= ZEROPAD; + goto repeat; + } + + /* get field width */ + field_width = -1; + if (isdigit(*fmt)) + field_width = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + field_width = va_arg(args, int); + if (field_width < 0) { + field_width = -field_width; + flags |= LEFT; + } + } + + /* get the precision */ + precision = -1; + if (*fmt == '.') { + ++fmt; + if (isdigit(*fmt)) + precision = skip_atoi(&fmt); + else if (*fmt == '*') { + ++fmt; + /* it's the next argument */ + precision = va_arg(args, int); + } + if (precision < 0) + precision = 0; + } + + /* get the conversion qualifier */ + qualifier = -1; + if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L') { + qualifier = *fmt; + ++fmt; + } + + /* + * Play nice with %llu, %llx, etc. KVM selftests only support + * 64-bit builds, so just treat %ll* the same as %l*. + */ + if (qualifier == 'l' && *fmt == 'l') + ++fmt; + + /* default base */ + base = 10; + + switch (*fmt) { + case 'c': + if (!(flags & LEFT)) + while (--field_width > 0) + APPEND_BUFFER_SAFE(str, end, ' '); + APPEND_BUFFER_SAFE(str, end, + (uint8_t)va_arg(args, int)); + while (--field_width > 0) + APPEND_BUFFER_SAFE(str, end, ' '); + continue; + + case 's': + s = va_arg(args, char *); + len = strnlen(s, precision); + + if (!(flags & LEFT)) + while (len < field_width--) + APPEND_BUFFER_SAFE(str, end, ' '); + for (i = 0; i < len; ++i) + APPEND_BUFFER_SAFE(str, end, *s++); + while (len < field_width--) + APPEND_BUFFER_SAFE(str, end, ' '); + continue; + + case 'p': + if (field_width == -1) { + field_width = 2 * sizeof(void *); + flags |= SPECIAL | SMALL | ZEROPAD; + } + str = number(str, end, + (uint64_t)va_arg(args, void *), 16, + field_width, precision, flags); + continue; + + case 'n': + if (qualifier == 'l') { + long *ip = va_arg(args, long *); + *ip = (str - buf); + } else { + int *ip = va_arg(args, int *); + *ip = (str - buf); + } + continue; + + case '%': + APPEND_BUFFER_SAFE(str, end, '%'); + continue; + + /* integer number formats - set up the flags and "break" */ + case 'o': + base = 8; + break; + + case 'x': + flags |= SMALL; + case 'X': + base = 16; + break; + + case 'd': + case 'i': + flags |= SIGN; + case 'u': + break; + + default: + APPEND_BUFFER_SAFE(str, end, '%'); + if (*fmt) + APPEND_BUFFER_SAFE(str, end, *fmt); + else + --fmt; + continue; + } + if (qualifier == 'l') + num = va_arg(args, uint64_t); + else if (qualifier == 'h') { + num = (uint16_t)va_arg(args, int); + if (flags & SIGN) + num = (int16_t)num; + } else if (flags & SIGN) + num = va_arg(args, int); + else + num = va_arg(args, uint32_t); + str = number(str, end, num, base, field_width, precision, flags); + } + + GUEST_ASSERT(str < end); + *str = '\0'; + return str - buf; +} + +int guest_snprintf(char *buf, int n, const char *fmt, ...) +{ + va_list va; + int len; + + va_start(va, fmt); + len = guest_vsnprintf(buf, n, fmt, va); + va_end(va); + + return len; +} diff --git a/tools/testing/selftests/kvm/lib/io.c b/tools/testing/selftests/kvm/lib/io.c new file mode 100644 index 0000000000..fedb2a741f --- /dev/null +++ b/tools/testing/selftests/kvm/lib/io.c @@ -0,0 +1,157 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * tools/testing/selftests/kvm/lib/io.c + * + * Copyright (C) 2018, Google LLC. + */ + +#include "test_util.h" + +/* Test Write + * + * A wrapper for write(2), that automatically handles the following + * special conditions: + * + * + Interrupted system call (EINTR) + * + Write of less than requested amount + * + Non-block return (EAGAIN) + * + * For each of the above, an additional write is performed to automatically + * continue writing the requested data. + * There are also many cases where write(2) can return an unexpected + * error (e.g. EIO). Such errors cause a TEST_ASSERT failure. + * + * Note, for function signature compatibility with write(2), this function + * returns the number of bytes written, but that value will always be equal + * to the number of requested bytes. All other conditions in this and + * future enhancements to this function either automatically issue another + * write(2) or cause a TEST_ASSERT failure. + * + * Args: + * fd - Opened file descriptor to file to be written. + * count - Number of bytes to write. + * + * Output: + * buf - Starting address of data to be written. + * + * Return: + * On success, number of bytes written. + * On failure, a TEST_ASSERT failure is caused. + */ +ssize_t test_write(int fd, const void *buf, size_t count) +{ + ssize_t rc; + ssize_t num_written = 0; + size_t num_left = count; + const char *ptr = buf; + + /* Note: Count of zero is allowed (see "RETURN VALUE" portion of + * write(2) manpage for details. + */ + TEST_ASSERT(count >= 0, "Unexpected count, count: %li", count); + + do { + rc = write(fd, ptr, num_left); + + switch (rc) { + case -1: + TEST_ASSERT(errno == EAGAIN || errno == EINTR, + "Unexpected write failure,\n" + " rc: %zi errno: %i", rc, errno); + continue; + + case 0: + TEST_FAIL("Unexpected EOF,\n" + " rc: %zi num_written: %zi num_left: %zu", + rc, num_written, num_left); + break; + + default: + TEST_ASSERT(rc >= 0, "Unexpected ret from write,\n" + " rc: %zi errno: %i", rc, errno); + num_written += rc; + num_left -= rc; + ptr += rc; + break; + } + } while (num_written < count); + + return num_written; +} + +/* Test Read + * + * A wrapper for read(2), that automatically handles the following + * special conditions: + * + * + Interrupted system call (EINTR) + * + Read of less than requested amount + * + Non-block return (EAGAIN) + * + * For each of the above, an additional read is performed to automatically + * continue reading the requested data. + * There are also many cases where read(2) can return an unexpected + * error (e.g. EIO). Such errors cause a TEST_ASSERT failure. Note, + * it is expected that the file opened by fd at the current file position + * contains at least the number of requested bytes to be read. A TEST_ASSERT + * failure is produced if an End-Of-File condition occurs, before all the + * data is read. It is the callers responsibility to assure that sufficient + * data exists. + * + * Note, for function signature compatibility with read(2), this function + * returns the number of bytes read, but that value will always be equal + * to the number of requested bytes. All other conditions in this and + * future enhancements to this function either automatically issue another + * read(2) or cause a TEST_ASSERT failure. + * + * Args: + * fd - Opened file descriptor to file to be read. + * count - Number of bytes to read. + * + * Output: + * buf - Starting address of where to write the bytes read. + * + * Return: + * On success, number of bytes read. + * On failure, a TEST_ASSERT failure is caused. + */ +ssize_t test_read(int fd, void *buf, size_t count) +{ + ssize_t rc; + ssize_t num_read = 0; + size_t num_left = count; + char *ptr = buf; + + /* Note: Count of zero is allowed (see "If count is zero" portion of + * read(2) manpage for details. + */ + TEST_ASSERT(count >= 0, "Unexpected count, count: %li", count); + + do { + rc = read(fd, ptr, num_left); + + switch (rc) { + case -1: + TEST_ASSERT(errno == EAGAIN || errno == EINTR, + "Unexpected read failure,\n" + " rc: %zi errno: %i", rc, errno); + break; + + case 0: + TEST_FAIL("Unexpected EOF,\n" + " rc: %zi num_read: %zi num_left: %zu", + rc, num_read, num_left); + break; + + default: + TEST_ASSERT(rc > 0, "Unexpected ret from read,\n" + " rc: %zi errno: %i", rc, errno); + num_read += rc; + num_left -= rc; + ptr += rc; + break; + } + } while (num_read < count); + + return num_read; +} diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c new file mode 100644 index 0000000000..7a8af1821f --- /dev/null +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -0,0 +1,2178 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * tools/testing/selftests/kvm/lib/kvm_util.c + * + * Copyright (C) 2018, Google LLC. + */ + +#define _GNU_SOURCE /* for program_invocation_name */ +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" + +#include <assert.h> +#include <sched.h> +#include <sys/mman.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <unistd.h> +#include <linux/kernel.h> + +#define KVM_UTIL_MIN_PFN 2 + +static int vcpu_mmap_sz(void); + +int open_path_or_exit(const char *path, int flags) +{ + int fd; + + fd = open(path, flags); + __TEST_REQUIRE(fd >= 0, "%s not available (errno: %d)", path, errno); + + return fd; +} + +/* + * Open KVM_DEV_PATH if available, otherwise exit the entire program. + * + * Input Args: + * flags - The flags to pass when opening KVM_DEV_PATH. + * + * Return: + * The opened file descriptor of /dev/kvm. + */ +static int _open_kvm_dev_path_or_exit(int flags) +{ + return open_path_or_exit(KVM_DEV_PATH, flags); +} + +int open_kvm_dev_path_or_exit(void) +{ + return _open_kvm_dev_path_or_exit(O_RDONLY); +} + +static bool get_module_param_bool(const char *module_name, const char *param) +{ + const int path_size = 128; + char path[path_size]; + char value; + ssize_t r; + int fd; + + r = snprintf(path, path_size, "/sys/module/%s/parameters/%s", + module_name, param); + TEST_ASSERT(r < path_size, + "Failed to construct sysfs path in %d bytes.", path_size); + + fd = open_path_or_exit(path, O_RDONLY); + + r = read(fd, &value, 1); + TEST_ASSERT(r == 1, "read(%s) failed", path); + + r = close(fd); + TEST_ASSERT(!r, "close(%s) failed", path); + + if (value == 'Y') + return true; + else if (value == 'N') + return false; + + TEST_FAIL("Unrecognized value '%c' for boolean module param", value); +} + +bool get_kvm_param_bool(const char *param) +{ + return get_module_param_bool("kvm", param); +} + +bool get_kvm_intel_param_bool(const char *param) +{ + return get_module_param_bool("kvm_intel", param); +} + +bool get_kvm_amd_param_bool(const char *param) +{ + return get_module_param_bool("kvm_amd", param); +} + +/* + * Capability + * + * Input Args: + * cap - Capability + * + * Output Args: None + * + * Return: + * On success, the Value corresponding to the capability (KVM_CAP_*) + * specified by the value of cap. On failure a TEST_ASSERT failure + * is produced. + * + * Looks up and returns the value corresponding to the capability + * (KVM_CAP_*) given by cap. + */ +unsigned int kvm_check_cap(long cap) +{ + int ret; + int kvm_fd; + + kvm_fd = open_kvm_dev_path_or_exit(); + ret = __kvm_ioctl(kvm_fd, KVM_CHECK_EXTENSION, (void *)cap); + TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_CHECK_EXTENSION, ret)); + + close(kvm_fd); + + return (unsigned int)ret; +} + +void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size) +{ + if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL)) + vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size); + else + vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size); + vm->dirty_ring_size = ring_size; +} + +static void vm_open(struct kvm_vm *vm) +{ + vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR); + + TEST_REQUIRE(kvm_has_cap(KVM_CAP_IMMEDIATE_EXIT)); + + vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type); + TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd)); +} + +const char *vm_guest_mode_string(uint32_t i) +{ + static const char * const strings[] = { + [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages", + [VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages", + [VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages", + [VM_MODE_P48V48_16K] = "PA-bits:48, VA-bits:48, 16K pages", + [VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages", + [VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages", + [VM_MODE_P40V48_16K] = "PA-bits:40, VA-bits:48, 16K pages", + [VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages", + [VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages", + [VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages", + [VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages", + [VM_MODE_P36V48_4K] = "PA-bits:36, VA-bits:48, 4K pages", + [VM_MODE_P36V48_16K] = "PA-bits:36, VA-bits:48, 16K pages", + [VM_MODE_P36V48_64K] = "PA-bits:36, VA-bits:48, 64K pages", + [VM_MODE_P36V47_16K] = "PA-bits:36, VA-bits:47, 16K pages", + }; + _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES, + "Missing new mode strings?"); + + TEST_ASSERT(i < NUM_VM_MODES, "Guest mode ID %d too big", i); + + return strings[i]; +} + +const struct vm_guest_mode_params vm_guest_mode_params[] = { + [VM_MODE_P52V48_4K] = { 52, 48, 0x1000, 12 }, + [VM_MODE_P52V48_64K] = { 52, 48, 0x10000, 16 }, + [VM_MODE_P48V48_4K] = { 48, 48, 0x1000, 12 }, + [VM_MODE_P48V48_16K] = { 48, 48, 0x4000, 14 }, + [VM_MODE_P48V48_64K] = { 48, 48, 0x10000, 16 }, + [VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 }, + [VM_MODE_P40V48_16K] = { 40, 48, 0x4000, 14 }, + [VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 }, + [VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 }, + [VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 }, + [VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 }, + [VM_MODE_P36V48_4K] = { 36, 48, 0x1000, 12 }, + [VM_MODE_P36V48_16K] = { 36, 48, 0x4000, 14 }, + [VM_MODE_P36V48_64K] = { 36, 48, 0x10000, 16 }, + [VM_MODE_P36V47_16K] = { 36, 47, 0x4000, 14 }, +}; +_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES, + "Missing new mode params?"); + +/* + * Initializes vm->vpages_valid to match the canonical VA space of the + * architecture. + * + * The default implementation is valid for architectures which split the + * range addressed by a single page table into a low and high region + * based on the MSB of the VA. On architectures with this behavior + * the VA region spans [0, 2^(va_bits - 1)), [-(2^(va_bits - 1), -1]. + */ +__weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm) +{ + sparsebit_set_num(vm->vpages_valid, + 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); + sparsebit_set_num(vm->vpages_valid, + (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift, + (1ULL << (vm->va_bits - 1)) >> vm->page_shift); +} + +struct kvm_vm *____vm_create(enum vm_guest_mode mode) +{ + struct kvm_vm *vm; + + vm = calloc(1, sizeof(*vm)); + TEST_ASSERT(vm != NULL, "Insufficient Memory"); + + INIT_LIST_HEAD(&vm->vcpus); + vm->regions.gpa_tree = RB_ROOT; + vm->regions.hva_tree = RB_ROOT; + hash_init(vm->regions.slot_hash); + + vm->mode = mode; + vm->type = 0; + + vm->pa_bits = vm_guest_mode_params[mode].pa_bits; + vm->va_bits = vm_guest_mode_params[mode].va_bits; + vm->page_size = vm_guest_mode_params[mode].page_size; + vm->page_shift = vm_guest_mode_params[mode].page_shift; + + /* Setup mode specific traits. */ + switch (vm->mode) { + case VM_MODE_P52V48_4K: + vm->pgtable_levels = 4; + break; + case VM_MODE_P52V48_64K: + vm->pgtable_levels = 3; + break; + case VM_MODE_P48V48_4K: + vm->pgtable_levels = 4; + break; + case VM_MODE_P48V48_64K: + vm->pgtable_levels = 3; + break; + case VM_MODE_P40V48_4K: + case VM_MODE_P36V48_4K: + vm->pgtable_levels = 4; + break; + case VM_MODE_P40V48_64K: + case VM_MODE_P36V48_64K: + vm->pgtable_levels = 3; + break; + case VM_MODE_P48V48_16K: + case VM_MODE_P40V48_16K: + case VM_MODE_P36V48_16K: + vm->pgtable_levels = 4; + break; + case VM_MODE_P36V47_16K: + vm->pgtable_levels = 3; + break; + case VM_MODE_PXXV48_4K: +#ifdef __x86_64__ + kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); + /* + * Ignore KVM support for 5-level paging (vm->va_bits == 57), + * it doesn't take effect unless a CR4.LA57 is set, which it + * isn't for this VM_MODE. + */ + TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57, + "Linear address width (%d bits) not supported", + vm->va_bits); + pr_debug("Guest physical address width detected: %d\n", + vm->pa_bits); + vm->pgtable_levels = 4; + vm->va_bits = 48; +#else + TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms"); +#endif + break; + case VM_MODE_P47V64_4K: + vm->pgtable_levels = 5; + break; + case VM_MODE_P44V64_4K: + vm->pgtable_levels = 5; + break; + default: + TEST_FAIL("Unknown guest mode, mode: 0x%x", mode); + } + +#ifdef __aarch64__ + if (vm->pa_bits != 40) + vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits); +#endif + + vm_open(vm); + + /* Limit to VA-bit canonical virtual addresses. */ + vm->vpages_valid = sparsebit_alloc(); + vm_vaddr_populate_bitmap(vm); + + /* Limit physical addresses to PA-bits. */ + vm->max_gfn = vm_compute_max_gfn(vm); + + /* Allocate and setup memory for guest. */ + vm->vpages_mapped = sparsebit_alloc(); + + return vm; +} + +static uint64_t vm_nr_pages_required(enum vm_guest_mode mode, + uint32_t nr_runnable_vcpus, + uint64_t extra_mem_pages) +{ + uint64_t page_size = vm_guest_mode_params[mode].page_size; + uint64_t nr_pages; + + TEST_ASSERT(nr_runnable_vcpus, + "Use vm_create_barebones() for VMs that _never_ have vCPUs\n"); + + TEST_ASSERT(nr_runnable_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS), + "nr_vcpus = %d too large for host, max-vcpus = %d", + nr_runnable_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS)); + + /* + * Arbitrarily allocate 512 pages (2mb when page size is 4kb) for the + * test code and other per-VM assets that will be loaded into memslot0. + */ + nr_pages = 512; + + /* Account for the per-vCPU stacks on behalf of the test. */ + nr_pages += nr_runnable_vcpus * DEFAULT_STACK_PGS; + + /* + * Account for the number of pages needed for the page tables. The + * maximum page table size for a memory region will be when the + * smallest page size is used. Considering each page contains x page + * table descriptors, the total extra size for page tables (for extra + * N pages) will be: N/x+N/x^2+N/x^3+... which is definitely smaller + * than N/x*2. + */ + nr_pages += (nr_pages + extra_mem_pages) / PTES_PER_MIN_PAGE * 2; + + /* Account for the number of pages needed by ucall. */ + nr_pages += ucall_nr_pages_required(page_size); + + return vm_adjust_num_guest_pages(mode, nr_pages); +} + +struct kvm_vm *__vm_create(enum vm_guest_mode mode, uint32_t nr_runnable_vcpus, + uint64_t nr_extra_pages) +{ + uint64_t nr_pages = vm_nr_pages_required(mode, nr_runnable_vcpus, + nr_extra_pages); + struct userspace_mem_region *slot0; + struct kvm_vm *vm; + int i; + + pr_debug("%s: mode='%s' pages='%ld'\n", __func__, + vm_guest_mode_string(mode), nr_pages); + + vm = ____vm_create(mode); + + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, 0); + for (i = 0; i < NR_MEM_REGIONS; i++) + vm->memslots[i] = 0; + + kvm_vm_elf_load(vm, program_invocation_name); + + /* + * TODO: Add proper defines to protect the library's memslots, and then + * carve out memslot1 for the ucall MMIO address. KVM treats writes to + * read-only memslots as MMIO, and creating a read-only memslot for the + * MMIO region would prevent silently clobbering the MMIO region. + */ + slot0 = memslot2region(vm, 0); + ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size); + + kvm_arch_vm_post_create(vm); + + return vm; +} + +/* + * VM Create with customized parameters + * + * Input Args: + * mode - VM Mode (e.g. VM_MODE_P52V48_4K) + * nr_vcpus - VCPU count + * extra_mem_pages - Non-slot0 physical memory total size + * guest_code - Guest entry point + * vcpuids - VCPU IDs + * + * Output Args: None + * + * Return: + * Pointer to opaque structure that describes the created VM. + * + * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K). + * extra_mem_pages is only used to calculate the maximum page table size, + * no real memory allocation for non-slot0 memory in this function. + */ +struct kvm_vm *__vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus, + uint64_t extra_mem_pages, + void *guest_code, struct kvm_vcpu *vcpus[]) +{ + struct kvm_vm *vm; + int i; + + TEST_ASSERT(!nr_vcpus || vcpus, "Must provide vCPU array"); + + vm = __vm_create(mode, nr_vcpus, extra_mem_pages); + + for (i = 0; i < nr_vcpus; ++i) + vcpus[i] = vm_vcpu_add(vm, i, guest_code); + + return vm; +} + +struct kvm_vm *__vm_create_with_one_vcpu(struct kvm_vcpu **vcpu, + uint64_t extra_mem_pages, + void *guest_code) +{ + struct kvm_vcpu *vcpus[1]; + struct kvm_vm *vm; + + vm = __vm_create_with_vcpus(VM_MODE_DEFAULT, 1, extra_mem_pages, + guest_code, vcpus); + + *vcpu = vcpus[0]; + return vm; +} + +/* + * VM Restart + * + * Input Args: + * vm - VM that has been released before + * + * Output Args: None + * + * Reopens the file descriptors associated to the VM and reinstates the + * global state, such as the irqchip and the memory regions that are mapped + * into the guest. + */ +void kvm_vm_restart(struct kvm_vm *vmp) +{ + int ctr; + struct userspace_mem_region *region; + + vm_open(vmp); + if (vmp->has_irqchip) + vm_create_irqchip(vmp); + + hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) { + int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region); + TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" + " rc: %i errno: %i\n" + " slot: %u flags: 0x%x\n" + " guest_phys_addr: 0x%llx size: 0x%llx", + ret, errno, region->region.slot, + region->region.flags, + region->region.guest_phys_addr, + region->region.memory_size); + } +} + +__weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, + uint32_t vcpu_id) +{ + return __vm_vcpu_add(vm, vcpu_id); +} + +struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm) +{ + kvm_vm_restart(vm); + + return vm_vcpu_recreate(vm, 0); +} + +void kvm_pin_this_task_to_pcpu(uint32_t pcpu) +{ + cpu_set_t mask; + int r; + + CPU_ZERO(&mask); + CPU_SET(pcpu, &mask); + r = sched_setaffinity(0, sizeof(mask), &mask); + TEST_ASSERT(!r, "sched_setaffinity() failed for pCPU '%u'.\n", pcpu); +} + +static uint32_t parse_pcpu(const char *cpu_str, const cpu_set_t *allowed_mask) +{ + uint32_t pcpu = atoi_non_negative("CPU number", cpu_str); + + TEST_ASSERT(CPU_ISSET(pcpu, allowed_mask), + "Not allowed to run on pCPU '%d', check cgroups?\n", pcpu); + return pcpu; +} + +void kvm_print_vcpu_pinning_help(void) +{ + const char *name = program_invocation_name; + + printf(" -c: Pin tasks to physical CPUs. Takes a list of comma separated\n" + " values (target pCPU), one for each vCPU, plus an optional\n" + " entry for the main application task (specified via entry\n" + " <nr_vcpus + 1>). If used, entries must be provided for all\n" + " vCPUs, i.e. pinning vCPUs is all or nothing.\n\n" + " E.g. to create 3 vCPUs, pin vCPU0=>pCPU22, vCPU1=>pCPU23,\n" + " vCPU2=>pCPU24, and pin the application task to pCPU50:\n\n" + " %s -v 3 -c 22,23,24,50\n\n" + " To leave the application task unpinned, drop the final entry:\n\n" + " %s -v 3 -c 22,23,24\n\n" + " (default: no pinning)\n", name, name); +} + +void kvm_parse_vcpu_pinning(const char *pcpus_string, uint32_t vcpu_to_pcpu[], + int nr_vcpus) +{ + cpu_set_t allowed_mask; + char *cpu, *cpu_list; + char delim[2] = ","; + int i, r; + + cpu_list = strdup(pcpus_string); + TEST_ASSERT(cpu_list, "strdup() allocation failed.\n"); + + r = sched_getaffinity(0, sizeof(allowed_mask), &allowed_mask); + TEST_ASSERT(!r, "sched_getaffinity() failed"); + + cpu = strtok(cpu_list, delim); + + /* 1. Get all pcpus for vcpus. */ + for (i = 0; i < nr_vcpus; i++) { + TEST_ASSERT(cpu, "pCPU not provided for vCPU '%d'\n", i); + vcpu_to_pcpu[i] = parse_pcpu(cpu, &allowed_mask); + cpu = strtok(NULL, delim); + } + + /* 2. Check if the main worker needs to be pinned. */ + if (cpu) { + kvm_pin_this_task_to_pcpu(parse_pcpu(cpu, &allowed_mask)); + cpu = strtok(NULL, delim); + } + + TEST_ASSERT(!cpu, "pCPU list contains trailing garbage characters '%s'", cpu); + free(cpu_list); +} + +/* + * Userspace Memory Region Find + * + * Input Args: + * vm - Virtual Machine + * start - Starting VM physical address + * end - Ending VM physical address, inclusive. + * + * Output Args: None + * + * Return: + * Pointer to overlapping region, NULL if no such region. + * + * Searches for a region with any physical memory that overlaps with + * any portion of the guest physical addresses from start to end + * inclusive. If multiple overlapping regions exist, a pointer to any + * of the regions is returned. Null is returned only when no overlapping + * region exists. + */ +static struct userspace_mem_region * +userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) +{ + struct rb_node *node; + + for (node = vm->regions.gpa_tree.rb_node; node; ) { + struct userspace_mem_region *region = + container_of(node, struct userspace_mem_region, gpa_node); + uint64_t existing_start = region->region.guest_phys_addr; + uint64_t existing_end = region->region.guest_phys_addr + + region->region.memory_size - 1; + if (start <= existing_end && end >= existing_start) + return region; + + if (start < existing_start) + node = node->rb_left; + else + node = node->rb_right; + } + + return NULL; +} + +/* + * KVM Userspace Memory Region Find + * + * Input Args: + * vm - Virtual Machine + * start - Starting VM physical address + * end - Ending VM physical address, inclusive. + * + * Output Args: None + * + * Return: + * Pointer to overlapping region, NULL if no such region. + * + * Public interface to userspace_mem_region_find. Allows tests to look up + * the memslot datastructure for a given range of guest physical memory. + */ +struct kvm_userspace_memory_region * +kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start, + uint64_t end) +{ + struct userspace_mem_region *region; + + region = userspace_mem_region_find(vm, start, end); + if (!region) + return NULL; + + return ®ion->region; +} + +__weak void vcpu_arch_free(struct kvm_vcpu *vcpu) +{ + +} + +/* + * VM VCPU Remove + * + * Input Args: + * vcpu - VCPU to remove + * + * Output Args: None + * + * Return: None, TEST_ASSERT failures for all error conditions + * + * Removes a vCPU from a VM and frees its resources. + */ +static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu) +{ + int ret; + + if (vcpu->dirty_gfns) { + ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size); + TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); + vcpu->dirty_gfns = NULL; + } + + ret = munmap(vcpu->run, vcpu_mmap_sz()); + TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); + + ret = close(vcpu->fd); + TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret)); + + list_del(&vcpu->list); + + vcpu_arch_free(vcpu); + free(vcpu); +} + +void kvm_vm_release(struct kvm_vm *vmp) +{ + struct kvm_vcpu *vcpu, *tmp; + int ret; + + list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list) + vm_vcpu_rm(vmp, vcpu); + + ret = close(vmp->fd); + TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret)); + + ret = close(vmp->kvm_fd); + TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("close()", ret)); +} + +static void __vm_mem_region_delete(struct kvm_vm *vm, + struct userspace_mem_region *region, + bool unlink) +{ + int ret; + + if (unlink) { + rb_erase(®ion->gpa_node, &vm->regions.gpa_tree); + rb_erase(®ion->hva_node, &vm->regions.hva_tree); + hash_del(®ion->slot_node); + } + + region->region.memory_size = 0; + vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); + + sparsebit_free(®ion->unused_phy_pages); + ret = munmap(region->mmap_start, region->mmap_size); + TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); + if (region->fd >= 0) { + /* There's an extra map when using shared memory. */ + ret = munmap(region->mmap_alias, region->mmap_size); + TEST_ASSERT(!ret, __KVM_SYSCALL_ERROR("munmap()", ret)); + close(region->fd); + } + + free(region); +} + +/* + * Destroys and frees the VM pointed to by vmp. + */ +void kvm_vm_free(struct kvm_vm *vmp) +{ + int ctr; + struct hlist_node *node; + struct userspace_mem_region *region; + + if (vmp == NULL) + return; + + /* Free cached stats metadata and close FD */ + if (vmp->stats_fd) { + free(vmp->stats_desc); + close(vmp->stats_fd); + } + + /* Free userspace_mem_regions. */ + hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node) + __vm_mem_region_delete(vmp, region, false); + + /* Free sparsebit arrays. */ + sparsebit_free(&vmp->vpages_valid); + sparsebit_free(&vmp->vpages_mapped); + + kvm_vm_release(vmp); + + /* Free the structure describing the VM. */ + free(vmp); +} + +int kvm_memfd_alloc(size_t size, bool hugepages) +{ + int memfd_flags = MFD_CLOEXEC; + int fd, r; + + if (hugepages) + memfd_flags |= MFD_HUGETLB; + + fd = memfd_create("kvm_selftest", memfd_flags); + TEST_ASSERT(fd != -1, __KVM_SYSCALL_ERROR("memfd_create()", fd)); + + r = ftruncate(fd, size); + TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("ftruncate()", r)); + + r = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, size); + TEST_ASSERT(!r, __KVM_SYSCALL_ERROR("fallocate()", r)); + + return fd; +} + +/* + * Memory Compare, host virtual to guest virtual + * + * Input Args: + * hva - Starting host virtual address + * vm - Virtual Machine + * gva - Starting guest virtual address + * len - number of bytes to compare + * + * Output Args: None + * + * Input/Output Args: None + * + * Return: + * Returns 0 if the bytes starting at hva for a length of len + * are equal the guest virtual bytes starting at gva. Returns + * a value < 0, if bytes at hva are less than those at gva. + * Otherwise a value > 0 is returned. + * + * Compares the bytes starting at the host virtual address hva, for + * a length of len, to the guest bytes starting at the guest virtual + * address given by gva. + */ +int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len) +{ + size_t amt; + + /* + * Compare a batch of bytes until either a match is found + * or all the bytes have been compared. + */ + for (uintptr_t offset = 0; offset < len; offset += amt) { + uintptr_t ptr1 = (uintptr_t)hva + offset; + + /* + * Determine host address for guest virtual address + * at offset. + */ + uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset); + + /* + * Determine amount to compare on this pass. + * Don't allow the comparsion to cross a page boundary. + */ + amt = len - offset; + if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift)) + amt = vm->page_size - (ptr1 % vm->page_size); + if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift)) + amt = vm->page_size - (ptr2 % vm->page_size); + + assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift)); + assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift)); + + /* + * Perform the comparison. If there is a difference + * return that result to the caller, otherwise need + * to continue on looking for a mismatch. + */ + int ret = memcmp((void *)ptr1, (void *)ptr2, amt); + if (ret != 0) + return ret; + } + + /* + * No mismatch found. Let the caller know the two memory + * areas are equal. + */ + return 0; +} + +static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree, + struct userspace_mem_region *region) +{ + struct rb_node **cur, *parent; + + for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) { + struct userspace_mem_region *cregion; + + cregion = container_of(*cur, typeof(*cregion), gpa_node); + parent = *cur; + if (region->region.guest_phys_addr < + cregion->region.guest_phys_addr) + cur = &(*cur)->rb_left; + else { + TEST_ASSERT(region->region.guest_phys_addr != + cregion->region.guest_phys_addr, + "Duplicate GPA in region tree"); + + cur = &(*cur)->rb_right; + } + } + + rb_link_node(®ion->gpa_node, parent, cur); + rb_insert_color(®ion->gpa_node, gpa_tree); +} + +static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree, + struct userspace_mem_region *region) +{ + struct rb_node **cur, *parent; + + for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) { + struct userspace_mem_region *cregion; + + cregion = container_of(*cur, typeof(*cregion), hva_node); + parent = *cur; + if (region->host_mem < cregion->host_mem) + cur = &(*cur)->rb_left; + else { + TEST_ASSERT(region->host_mem != + cregion->host_mem, + "Duplicate HVA in region tree"); + + cur = &(*cur)->rb_right; + } + } + + rb_link_node(®ion->hva_node, parent, cur); + rb_insert_color(®ion->hva_node, hva_tree); +} + + +int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, + uint64_t gpa, uint64_t size, void *hva) +{ + struct kvm_userspace_memory_region region = { + .slot = slot, + .flags = flags, + .guest_phys_addr = gpa, + .memory_size = size, + .userspace_addr = (uintptr_t)hva, + }; + + return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion); +} + +void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, + uint64_t gpa, uint64_t size, void *hva) +{ + int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva); + + TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed, errno = %d (%s)", + errno, strerror(errno)); +} + +/* + * VM Userspace Memory Region Add + * + * Input Args: + * vm - Virtual Machine + * src_type - Storage source for this region. + * NULL to use anonymous memory. + * guest_paddr - Starting guest physical address + * slot - KVM region slot + * npages - Number of physical pages + * flags - KVM memory region flags (e.g. KVM_MEM_LOG_DIRTY_PAGES) + * + * Output Args: None + * + * Return: None + * + * Allocates a memory area of the number of pages specified by npages + * and maps it to the VM specified by vm, at a starting physical address + * given by guest_paddr. The region is created with a KVM region slot + * given by slot, which must be unique and < KVM_MEM_SLOTS_NUM. The + * region is created with the flags given by flags. + */ +void vm_userspace_mem_region_add(struct kvm_vm *vm, + enum vm_mem_backing_src_type src_type, + uint64_t guest_paddr, uint32_t slot, uint64_t npages, + uint32_t flags) +{ + int ret; + struct userspace_mem_region *region; + size_t backing_src_pagesz = get_backing_src_pagesz(src_type); + size_t alignment; + + TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages, + "Number of guest pages is not compatible with the host. " + "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages)); + + TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " + "address not on a page boundary.\n" + " guest_paddr: 0x%lx vm->page_size: 0x%x", + guest_paddr, vm->page_size); + TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) + <= vm->max_gfn, "Physical range beyond maximum " + "supported physical address,\n" + " guest_paddr: 0x%lx npages: 0x%lx\n" + " vm->max_gfn: 0x%lx vm->page_size: 0x%x", + guest_paddr, npages, vm->max_gfn, vm->page_size); + + /* + * Confirm a mem region with an overlapping address doesn't + * already exist. + */ + region = (struct userspace_mem_region *) userspace_mem_region_find( + vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); + if (region != NULL) + TEST_FAIL("overlapping userspace_mem_region already " + "exists\n" + " requested guest_paddr: 0x%lx npages: 0x%lx " + "page_size: 0x%x\n" + " existing guest_paddr: 0x%lx size: 0x%lx", + guest_paddr, npages, vm->page_size, + (uint64_t) region->region.guest_phys_addr, + (uint64_t) region->region.memory_size); + + /* Confirm no region with the requested slot already exists. */ + hash_for_each_possible(vm->regions.slot_hash, region, slot_node, + slot) { + if (region->region.slot != slot) + continue; + + TEST_FAIL("A mem region with the requested slot " + "already exists.\n" + " requested slot: %u paddr: 0x%lx npages: 0x%lx\n" + " existing slot: %u paddr: 0x%lx size: 0x%lx", + slot, guest_paddr, npages, + region->region.slot, + (uint64_t) region->region.guest_phys_addr, + (uint64_t) region->region.memory_size); + } + + /* Allocate and initialize new mem region structure. */ + region = calloc(1, sizeof(*region)); + TEST_ASSERT(region != NULL, "Insufficient Memory"); + region->mmap_size = npages * vm->page_size; + +#ifdef __s390x__ + /* On s390x, the host address must be aligned to 1M (due to PGSTEs) */ + alignment = 0x100000; +#else + alignment = 1; +#endif + + /* + * When using THP mmap is not guaranteed to returned a hugepage aligned + * address so we have to pad the mmap. Padding is not needed for HugeTLB + * because mmap will always return an address aligned to the HugeTLB + * page size. + */ + if (src_type == VM_MEM_SRC_ANONYMOUS_THP) + alignment = max(backing_src_pagesz, alignment); + + TEST_ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz)); + + /* Add enough memory to align up if necessary */ + if (alignment > 1) + region->mmap_size += alignment; + + region->fd = -1; + if (backing_src_is_shared(src_type)) + region->fd = kvm_memfd_alloc(region->mmap_size, + src_type == VM_MEM_SRC_SHARED_HUGETLB); + + region->mmap_start = mmap(NULL, region->mmap_size, + PROT_READ | PROT_WRITE, + vm_mem_backing_src_alias(src_type)->flag, + region->fd, 0); + TEST_ASSERT(region->mmap_start != MAP_FAILED, + __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED)); + + TEST_ASSERT(!is_backing_src_hugetlb(src_type) || + region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz), + "mmap_start %p is not aligned to HugeTLB page size 0x%lx", + region->mmap_start, backing_src_pagesz); + + /* Align host address */ + region->host_mem = align_ptr_up(region->mmap_start, alignment); + + /* As needed perform madvise */ + if ((src_type == VM_MEM_SRC_ANONYMOUS || + src_type == VM_MEM_SRC_ANONYMOUS_THP) && thp_configured()) { + ret = madvise(region->host_mem, npages * vm->page_size, + src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE); + TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %s", + region->host_mem, npages * vm->page_size, + vm_mem_backing_src_alias(src_type)->name); + } + + region->backing_src_type = src_type; + region->unused_phy_pages = sparsebit_alloc(); + sparsebit_set_num(region->unused_phy_pages, + guest_paddr >> vm->page_shift, npages); + region->region.slot = slot; + region->region.flags = flags; + region->region.guest_phys_addr = guest_paddr; + region->region.memory_size = npages * vm->page_size; + region->region.userspace_addr = (uintptr_t) region->host_mem; + ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); + TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" + " rc: %i errno: %i\n" + " slot: %u flags: 0x%x\n" + " guest_phys_addr: 0x%lx size: 0x%lx", + ret, errno, slot, flags, + guest_paddr, (uint64_t) region->region.memory_size); + + /* Add to quick lookup data structures */ + vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region); + vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region); + hash_add(vm->regions.slot_hash, ®ion->slot_node, slot); + + /* If shared memory, create an alias. */ + if (region->fd >= 0) { + region->mmap_alias = mmap(NULL, region->mmap_size, + PROT_READ | PROT_WRITE, + vm_mem_backing_src_alias(src_type)->flag, + region->fd, 0); + TEST_ASSERT(region->mmap_alias != MAP_FAILED, + __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED)); + + /* Align host alias address */ + region->host_alias = align_ptr_up(region->mmap_alias, alignment); + } +} + +/* + * Memslot to region + * + * Input Args: + * vm - Virtual Machine + * memslot - KVM memory slot ID + * + * Output Args: None + * + * Return: + * Pointer to memory region structure that describe memory region + * using kvm memory slot ID given by memslot. TEST_ASSERT failure + * on error (e.g. currently no memory region using memslot as a KVM + * memory slot ID). + */ +struct userspace_mem_region * +memslot2region(struct kvm_vm *vm, uint32_t memslot) +{ + struct userspace_mem_region *region; + + hash_for_each_possible(vm->regions.slot_hash, region, slot_node, + memslot) + if (region->region.slot == memslot) + return region; + + fprintf(stderr, "No mem region with the requested slot found,\n" + " requested slot: %u\n", memslot); + fputs("---- vm dump ----\n", stderr); + vm_dump(stderr, vm, 2); + TEST_FAIL("Mem region not found"); + return NULL; +} + +/* + * VM Memory Region Flags Set + * + * Input Args: + * vm - Virtual Machine + * flags - Starting guest physical address + * + * Output Args: None + * + * Return: None + * + * Sets the flags of the memory region specified by the value of slot, + * to the values given by flags. + */ +void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) +{ + int ret; + struct userspace_mem_region *region; + + region = memslot2region(vm, slot); + + region->region.flags = flags; + + ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); + + TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n" + " rc: %i errno: %i slot: %u flags: 0x%x", + ret, errno, slot, flags); +} + +/* + * VM Memory Region Move + * + * Input Args: + * vm - Virtual Machine + * slot - Slot of the memory region to move + * new_gpa - Starting guest physical address + * + * Output Args: None + * + * Return: None + * + * Change the gpa of a memory region. + */ +void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) +{ + struct userspace_mem_region *region; + int ret; + + region = memslot2region(vm, slot); + + region->region.guest_phys_addr = new_gpa; + + ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION, ®ion->region); + + TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed\n" + "ret: %i errno: %i slot: %u new_gpa: 0x%lx", + ret, errno, slot, new_gpa); +} + +/* + * VM Memory Region Delete + * + * Input Args: + * vm - Virtual Machine + * slot - Slot of the memory region to delete + * + * Output Args: None + * + * Return: None + * + * Delete a memory region. + */ +void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) +{ + __vm_mem_region_delete(vm, memslot2region(vm, slot), true); +} + +/* Returns the size of a vCPU's kvm_run structure. */ +static int vcpu_mmap_sz(void) +{ + int dev_fd, ret; + + dev_fd = open_kvm_dev_path_or_exit(); + + ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL); + TEST_ASSERT(ret >= sizeof(struct kvm_run), + KVM_IOCTL_ERROR(KVM_GET_VCPU_MMAP_SIZE, ret)); + + close(dev_fd); + + return ret; +} + +static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id) +{ + struct kvm_vcpu *vcpu; + + list_for_each_entry(vcpu, &vm->vcpus, list) { + if (vcpu->id == vcpu_id) + return true; + } + + return false; +} + +/* + * Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id. + * No additional vCPU setup is done. Returns the vCPU. + */ +struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) +{ + struct kvm_vcpu *vcpu; + + /* Confirm a vcpu with the specified id doesn't already exist. */ + TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists\n", vcpu_id); + + /* Allocate and initialize new vcpu structure. */ + vcpu = calloc(1, sizeof(*vcpu)); + TEST_ASSERT(vcpu != NULL, "Insufficient Memory"); + + vcpu->vm = vm; + vcpu->id = vcpu_id; + vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id); + TEST_ASSERT(vcpu->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VCPU, vcpu->fd)); + + TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->run), "vcpu mmap size " + "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi", + vcpu_mmap_sz(), sizeof(*vcpu->run)); + vcpu->run = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(), + PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0); + TEST_ASSERT(vcpu->run != MAP_FAILED, + __KVM_SYSCALL_ERROR("mmap()", (int)(unsigned long)MAP_FAILED)); + + /* Add to linked-list of VCPUs. */ + list_add(&vcpu->list, &vm->vcpus); + + return vcpu; +} + +/* + * VM Virtual Address Unused Gap + * + * Input Args: + * vm - Virtual Machine + * sz - Size (bytes) + * vaddr_min - Minimum Virtual Address + * + * Output Args: None + * + * Return: + * Lowest virtual address at or below vaddr_min, with at least + * sz unused bytes. TEST_ASSERT failure if no area of at least + * size sz is available. + * + * Within the VM specified by vm, locates the lowest starting virtual + * address >= vaddr_min, that has at least sz unallocated bytes. A + * TEST_ASSERT failure occurs for invalid input or no area of at least + * sz unallocated bytes >= vaddr_min is available. + */ +vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, + vm_vaddr_t vaddr_min) +{ + uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; + + /* Determine lowest permitted virtual page index. */ + uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; + if ((pgidx_start * vm->page_size) < vaddr_min) + goto no_va_found; + + /* Loop over section with enough valid virtual page indexes. */ + if (!sparsebit_is_set_num(vm->vpages_valid, + pgidx_start, pages)) + pgidx_start = sparsebit_next_set_num(vm->vpages_valid, + pgidx_start, pages); + do { + /* + * Are there enough unused virtual pages available at + * the currently proposed starting virtual page index. + * If not, adjust proposed starting index to next + * possible. + */ + if (sparsebit_is_clear_num(vm->vpages_mapped, + pgidx_start, pages)) + goto va_found; + pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped, + pgidx_start, pages); + if (pgidx_start == 0) + goto no_va_found; + + /* + * If needed, adjust proposed starting virtual address, + * to next range of valid virtual addresses. + */ + if (!sparsebit_is_set_num(vm->vpages_valid, + pgidx_start, pages)) { + pgidx_start = sparsebit_next_set_num( + vm->vpages_valid, pgidx_start, pages); + if (pgidx_start == 0) + goto no_va_found; + } + } while (pgidx_start != 0); + +no_va_found: + TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages); + + /* NOT REACHED */ + return -1; + +va_found: + TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid, + pgidx_start, pages), + "Unexpected, invalid virtual page index range,\n" + " pgidx_start: 0x%lx\n" + " pages: 0x%lx", + pgidx_start, pages); + TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped, + pgidx_start, pages), + "Unexpected, pages already mapped,\n" + " pgidx_start: 0x%lx\n" + " pages: 0x%lx", + pgidx_start, pages); + + return pgidx_start * vm->page_size; +} + +vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, + enum kvm_mem_region_type type) +{ + uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); + + virt_pgd_alloc(vm); + vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages, + KVM_UTIL_MIN_PFN * vm->page_size, + vm->memslots[type]); + + /* + * Find an unused range of virtual page addresses of at least + * pages in length. + */ + vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); + + /* Map the virtual pages. */ + for (vm_vaddr_t vaddr = vaddr_start; pages > 0; + pages--, vaddr += vm->page_size, paddr += vm->page_size) { + + virt_pg_map(vm, vaddr, paddr); + + sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); + } + + return vaddr_start; +} + +/* + * VM Virtual Address Allocate + * + * Input Args: + * vm - Virtual Machine + * sz - Size in bytes + * vaddr_min - Minimum starting virtual address + * + * Output Args: None + * + * Return: + * Starting guest virtual address + * + * Allocates at least sz bytes within the virtual address space of the vm + * given by vm. The allocated bytes are mapped to a virtual address >= + * the address given by vaddr_min. Note that each allocation uses a + * a unique set of pages, with the minimum real allocation being at least + * a page. The allocated physical space comes from the TEST_DATA memory region. + */ +vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min) +{ + return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA); +} + +/* + * VM Virtual Address Allocate Pages + * + * Input Args: + * vm - Virtual Machine + * + * Output Args: None + * + * Return: + * Starting guest virtual address + * + * Allocates at least N system pages worth of bytes within the virtual address + * space of the vm. + */ +vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages) +{ + return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR); +} + +vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type) +{ + return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type); +} + +/* + * VM Virtual Address Allocate Page + * + * Input Args: + * vm - Virtual Machine + * + * Output Args: None + * + * Return: + * Starting guest virtual address + * + * Allocates at least one system page worth of bytes within the virtual address + * space of the vm. + */ +vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm) +{ + return vm_vaddr_alloc_pages(vm, 1); +} + +/* + * Map a range of VM virtual address to the VM's physical address + * + * Input Args: + * vm - Virtual Machine + * vaddr - Virtuall address to map + * paddr - VM Physical Address + * npages - The number of pages to map + * + * Output Args: None + * + * Return: None + * + * Within the VM given by @vm, creates a virtual translation for + * @npages starting at @vaddr to the page range starting at @paddr. + */ +void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, + unsigned int npages) +{ + size_t page_size = vm->page_size; + size_t size = npages * page_size; + + TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow"); + TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); + + while (npages--) { + virt_pg_map(vm, vaddr, paddr); + sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); + + vaddr += page_size; + paddr += page_size; + } +} + +/* + * Address VM Physical to Host Virtual + * + * Input Args: + * vm - Virtual Machine + * gpa - VM physical address + * + * Output Args: None + * + * Return: + * Equivalent host virtual address + * + * Locates the memory region containing the VM physical address given + * by gpa, within the VM given by vm. When found, the host virtual + * address providing the memory to the vm physical address is returned. + * A TEST_ASSERT failure occurs if no region containing gpa exists. + */ +void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) +{ + struct userspace_mem_region *region; + + region = userspace_mem_region_find(vm, gpa, gpa); + if (!region) { + TEST_FAIL("No vm physical memory at 0x%lx", gpa); + return NULL; + } + + return (void *)((uintptr_t)region->host_mem + + (gpa - region->region.guest_phys_addr)); +} + +/* + * Address Host Virtual to VM Physical + * + * Input Args: + * vm - Virtual Machine + * hva - Host virtual address + * + * Output Args: None + * + * Return: + * Equivalent VM physical address + * + * Locates the memory region containing the host virtual address given + * by hva, within the VM given by vm. When found, the equivalent + * VM physical address is returned. A TEST_ASSERT failure occurs if no + * region containing hva exists. + */ +vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) +{ + struct rb_node *node; + + for (node = vm->regions.hva_tree.rb_node; node; ) { + struct userspace_mem_region *region = + container_of(node, struct userspace_mem_region, hva_node); + + if (hva >= region->host_mem) { + if (hva <= (region->host_mem + + region->region.memory_size - 1)) + return (vm_paddr_t)((uintptr_t) + region->region.guest_phys_addr + + (hva - (uintptr_t)region->host_mem)); + + node = node->rb_right; + } else + node = node->rb_left; + } + + TEST_FAIL("No mapping to a guest physical address, hva: %p", hva); + return -1; +} + +/* + * Address VM physical to Host Virtual *alias*. + * + * Input Args: + * vm - Virtual Machine + * gpa - VM physical address + * + * Output Args: None + * + * Return: + * Equivalent address within the host virtual *alias* area, or NULL + * (without failing the test) if the guest memory is not shared (so + * no alias exists). + * + * Create a writable, shared virtual=>physical alias for the specific GPA. + * The primary use case is to allow the host selftest to manipulate guest + * memory without mapping said memory in the guest's address space. And, for + * userfaultfd-based demand paging, to do so without triggering userfaults. + */ +void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa) +{ + struct userspace_mem_region *region; + uintptr_t offset; + + region = userspace_mem_region_find(vm, gpa, gpa); + if (!region) + return NULL; + + if (!region->host_alias) + return NULL; + + offset = gpa - region->region.guest_phys_addr; + return (void *) ((uintptr_t) region->host_alias + offset); +} + +/* Create an interrupt controller chip for the specified VM. */ +void vm_create_irqchip(struct kvm_vm *vm) +{ + vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL); + + vm->has_irqchip = true; +} + +int _vcpu_run(struct kvm_vcpu *vcpu) +{ + int rc; + + do { + rc = __vcpu_run(vcpu); + } while (rc == -1 && errno == EINTR); + + assert_on_unhandled_exception(vcpu); + + return rc; +} + +/* + * Invoke KVM_RUN on a vCPU until KVM returns something other than -EINTR. + * Assert if the KVM returns an error (other than -EINTR). + */ +void vcpu_run(struct kvm_vcpu *vcpu) +{ + int ret = _vcpu_run(vcpu); + + TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_RUN, ret)); +} + +void vcpu_run_complete_io(struct kvm_vcpu *vcpu) +{ + int ret; + + vcpu->run->immediate_exit = 1; + ret = __vcpu_run(vcpu); + vcpu->run->immediate_exit = 0; + + TEST_ASSERT(ret == -1 && errno == EINTR, + "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i", + ret, errno); +} + +/* + * Get the list of guest registers which are supported for + * KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls. Returns a kvm_reg_list pointer, + * it is the caller's responsibility to free the list. + */ +struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vcpu *vcpu) +{ + struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list; + int ret; + + ret = __vcpu_ioctl(vcpu, KVM_GET_REG_LIST, ®_list_n); + TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0"); + + reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64)); + reg_list->n = reg_list_n.n; + vcpu_ioctl(vcpu, KVM_GET_REG_LIST, reg_list); + return reg_list; +} + +void *vcpu_map_dirty_ring(struct kvm_vcpu *vcpu) +{ + uint32_t page_size = getpagesize(); + uint32_t size = vcpu->vm->dirty_ring_size; + + TEST_ASSERT(size > 0, "Should enable dirty ring first"); + + if (!vcpu->dirty_gfns) { + void *addr; + + addr = mmap(NULL, size, PROT_READ, MAP_PRIVATE, vcpu->fd, + page_size * KVM_DIRTY_LOG_PAGE_OFFSET); + TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private"); + + addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE, vcpu->fd, + page_size * KVM_DIRTY_LOG_PAGE_OFFSET); + TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec"); + + addr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, + page_size * KVM_DIRTY_LOG_PAGE_OFFSET); + TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed"); + + vcpu->dirty_gfns = addr; + vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn); + } + + return vcpu->dirty_gfns; +} + +/* + * Device Ioctl + */ + +int __kvm_has_device_attr(int dev_fd, uint32_t group, uint64_t attr) +{ + struct kvm_device_attr attribute = { + .group = group, + .attr = attr, + .flags = 0, + }; + + return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute); +} + +int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type) +{ + struct kvm_create_device create_dev = { + .type = type, + .flags = KVM_CREATE_DEVICE_TEST, + }; + + return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev); +} + +int __kvm_create_device(struct kvm_vm *vm, uint64_t type) +{ + struct kvm_create_device create_dev = { + .type = type, + .fd = -1, + .flags = 0, + }; + int err; + + err = __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev); + TEST_ASSERT(err <= 0, "KVM_CREATE_DEVICE shouldn't return a positive value"); + return err ? : create_dev.fd; +} + +int __kvm_device_attr_get(int dev_fd, uint32_t group, uint64_t attr, void *val) +{ + struct kvm_device_attr kvmattr = { + .group = group, + .attr = attr, + .flags = 0, + .addr = (uintptr_t)val, + }; + + return __kvm_ioctl(dev_fd, KVM_GET_DEVICE_ATTR, &kvmattr); +} + +int __kvm_device_attr_set(int dev_fd, uint32_t group, uint64_t attr, void *val) +{ + struct kvm_device_attr kvmattr = { + .group = group, + .attr = attr, + .flags = 0, + .addr = (uintptr_t)val, + }; + + return __kvm_ioctl(dev_fd, KVM_SET_DEVICE_ATTR, &kvmattr); +} + +/* + * IRQ related functions. + */ + +int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) +{ + struct kvm_irq_level irq_level = { + .irq = irq, + .level = level, + }; + + return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level); +} + +void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) +{ + int ret = _kvm_irq_line(vm, irq, level); + + TEST_ASSERT(ret >= 0, KVM_IOCTL_ERROR(KVM_IRQ_LINE, ret)); +} + +struct kvm_irq_routing *kvm_gsi_routing_create(void) +{ + struct kvm_irq_routing *routing; + size_t size; + + size = sizeof(struct kvm_irq_routing); + /* Allocate space for the max number of entries: this wastes 196 KBs. */ + size += KVM_MAX_IRQ_ROUTES * sizeof(struct kvm_irq_routing_entry); + routing = calloc(1, size); + assert(routing); + + return routing; +} + +void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing, + uint32_t gsi, uint32_t pin) +{ + int i; + + assert(routing); + assert(routing->nr < KVM_MAX_IRQ_ROUTES); + + i = routing->nr; + routing->entries[i].gsi = gsi; + routing->entries[i].type = KVM_IRQ_ROUTING_IRQCHIP; + routing->entries[i].flags = 0; + routing->entries[i].u.irqchip.irqchip = 0; + routing->entries[i].u.irqchip.pin = pin; + routing->nr++; +} + +int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) +{ + int ret; + + assert(routing); + ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing); + free(routing); + + return ret; +} + +void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) +{ + int ret; + + ret = _kvm_gsi_routing_write(vm, routing); + TEST_ASSERT(!ret, KVM_IOCTL_ERROR(KVM_SET_GSI_ROUTING, ret)); +} + +/* + * VM Dump + * + * Input Args: + * vm - Virtual Machine + * indent - Left margin indent amount + * + * Output Args: + * stream - Output FILE stream + * + * Return: None + * + * Dumps the current state of the VM given by vm, to the FILE stream + * given by stream. + */ +void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +{ + int ctr; + struct userspace_mem_region *region; + struct kvm_vcpu *vcpu; + + fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode); + fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); + fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); + fprintf(stream, "%*sMem Regions:\n", indent, ""); + hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { + fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx " + "host_virt: %p\n", indent + 2, "", + (uint64_t) region->region.guest_phys_addr, + (uint64_t) region->region.memory_size, + region->host_mem); + fprintf(stream, "%*sunused_phy_pages: ", indent + 2, ""); + sparsebit_dump(stream, region->unused_phy_pages, 0); + } + fprintf(stream, "%*sMapped Virtual Pages:\n", indent, ""); + sparsebit_dump(stream, vm->vpages_mapped, indent + 2); + fprintf(stream, "%*spgd_created: %u\n", indent, "", + vm->pgd_created); + if (vm->pgd_created) { + fprintf(stream, "%*sVirtual Translation Tables:\n", + indent + 2, ""); + virt_dump(stream, vm, indent + 4); + } + fprintf(stream, "%*sVCPUs:\n", indent, ""); + + list_for_each_entry(vcpu, &vm->vcpus, list) + vcpu_dump(stream, vcpu, indent + 2); +} + +#define KVM_EXIT_STRING(x) {KVM_EXIT_##x, #x} + +/* Known KVM exit reasons */ +static struct exit_reason { + unsigned int reason; + const char *name; +} exit_reasons_known[] = { + KVM_EXIT_STRING(UNKNOWN), + KVM_EXIT_STRING(EXCEPTION), + KVM_EXIT_STRING(IO), + KVM_EXIT_STRING(HYPERCALL), + KVM_EXIT_STRING(DEBUG), + KVM_EXIT_STRING(HLT), + KVM_EXIT_STRING(MMIO), + KVM_EXIT_STRING(IRQ_WINDOW_OPEN), + KVM_EXIT_STRING(SHUTDOWN), + KVM_EXIT_STRING(FAIL_ENTRY), + KVM_EXIT_STRING(INTR), + KVM_EXIT_STRING(SET_TPR), + KVM_EXIT_STRING(TPR_ACCESS), + KVM_EXIT_STRING(S390_SIEIC), + KVM_EXIT_STRING(S390_RESET), + KVM_EXIT_STRING(DCR), + KVM_EXIT_STRING(NMI), + KVM_EXIT_STRING(INTERNAL_ERROR), + KVM_EXIT_STRING(OSI), + KVM_EXIT_STRING(PAPR_HCALL), + KVM_EXIT_STRING(S390_UCONTROL), + KVM_EXIT_STRING(WATCHDOG), + KVM_EXIT_STRING(S390_TSCH), + KVM_EXIT_STRING(EPR), + KVM_EXIT_STRING(SYSTEM_EVENT), + KVM_EXIT_STRING(S390_STSI), + KVM_EXIT_STRING(IOAPIC_EOI), + KVM_EXIT_STRING(HYPERV), + KVM_EXIT_STRING(ARM_NISV), + KVM_EXIT_STRING(X86_RDMSR), + KVM_EXIT_STRING(X86_WRMSR), + KVM_EXIT_STRING(DIRTY_RING_FULL), + KVM_EXIT_STRING(AP_RESET_HOLD), + KVM_EXIT_STRING(X86_BUS_LOCK), + KVM_EXIT_STRING(XEN), + KVM_EXIT_STRING(RISCV_SBI), + KVM_EXIT_STRING(RISCV_CSR), + KVM_EXIT_STRING(NOTIFY), +#ifdef KVM_EXIT_MEMORY_NOT_PRESENT + KVM_EXIT_STRING(MEMORY_NOT_PRESENT), +#endif +}; + +/* + * Exit Reason String + * + * Input Args: + * exit_reason - Exit reason + * + * Output Args: None + * + * Return: + * Constant string pointer describing the exit reason. + * + * Locates and returns a constant string that describes the KVM exit + * reason given by exit_reason. If no such string is found, a constant + * string of "Unknown" is returned. + */ +const char *exit_reason_str(unsigned int exit_reason) +{ + unsigned int n1; + + for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) { + if (exit_reason == exit_reasons_known[n1].reason) + return exit_reasons_known[n1].name; + } + + return "Unknown"; +} + +/* + * Physical Contiguous Page Allocator + * + * Input Args: + * vm - Virtual Machine + * num - number of pages + * paddr_min - Physical address minimum + * memslot - Memory region to allocate page from + * + * Output Args: None + * + * Return: + * Starting physical address + * + * Within the VM specified by vm, locates a range of available physical + * pages at or above paddr_min. If found, the pages are marked as in use + * and their base address is returned. A TEST_ASSERT failure occurs if + * not enough pages are available at or above paddr_min. + */ +vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, + vm_paddr_t paddr_min, uint32_t memslot) +{ + struct userspace_mem_region *region; + sparsebit_idx_t pg, base; + + TEST_ASSERT(num > 0, "Must allocate at least one page"); + + TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " + "not divisible by page size.\n" + " paddr_min: 0x%lx page_size: 0x%x", + paddr_min, vm->page_size); + + region = memslot2region(vm, memslot); + base = pg = paddr_min >> vm->page_shift; + + do { + for (; pg < base + num; ++pg) { + if (!sparsebit_is_set(region->unused_phy_pages, pg)) { + base = pg = sparsebit_next_set(region->unused_phy_pages, pg); + break; + } + } + } while (pg && pg != base + num); + + if (pg == 0) { + fprintf(stderr, "No guest physical page available, " + "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n", + paddr_min, vm->page_size, memslot); + fputs("---- vm dump ----\n", stderr); + vm_dump(stderr, vm, 2); + abort(); + } + + for (pg = base; pg < base + num; ++pg) + sparsebit_clear(region->unused_phy_pages, pg); + + return base * vm->page_size; +} + +vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, + uint32_t memslot) +{ + return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); +} + +vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm) +{ + return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, + vm->memslots[MEM_REGION_PT]); +} + +/* + * Address Guest Virtual to Host Virtual + * + * Input Args: + * vm - Virtual Machine + * gva - VM virtual address + * + * Output Args: None + * + * Return: + * Equivalent host virtual address + */ +void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) +{ + return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); +} + +unsigned long __weak vm_compute_max_gfn(struct kvm_vm *vm) +{ + return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; +} + +static unsigned int vm_calc_num_pages(unsigned int num_pages, + unsigned int page_shift, + unsigned int new_page_shift, + bool ceil) +{ + unsigned int n = 1 << (new_page_shift - page_shift); + + if (page_shift >= new_page_shift) + return num_pages * (1 << (page_shift - new_page_shift)); + + return num_pages / n + !!(ceil && num_pages % n); +} + +static inline int getpageshift(void) +{ + return __builtin_ffs(getpagesize()) - 1; +} + +unsigned int +vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages) +{ + return vm_calc_num_pages(num_guest_pages, + vm_guest_mode_params[mode].page_shift, + getpageshift(), true); +} + +unsigned int +vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages) +{ + return vm_calc_num_pages(num_host_pages, getpageshift(), + vm_guest_mode_params[mode].page_shift, false); +} + +unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size) +{ + unsigned int n; + n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size); + return vm_adjust_num_guest_pages(mode, n); +} + +/* + * Read binary stats descriptors + * + * Input Args: + * stats_fd - the file descriptor for the binary stats file from which to read + * header - the binary stats metadata header corresponding to the given FD + * + * Output Args: None + * + * Return: + * A pointer to a newly allocated series of stat descriptors. + * Caller is responsible for freeing the returned kvm_stats_desc. + * + * Read the stats descriptors from the binary stats interface. + */ +struct kvm_stats_desc *read_stats_descriptors(int stats_fd, + struct kvm_stats_header *header) +{ + struct kvm_stats_desc *stats_desc; + ssize_t desc_size, total_size, ret; + + desc_size = get_stats_descriptor_size(header); + total_size = header->num_desc * desc_size; + + stats_desc = calloc(header->num_desc, desc_size); + TEST_ASSERT(stats_desc, "Allocate memory for stats descriptors"); + + ret = pread(stats_fd, stats_desc, total_size, header->desc_offset); + TEST_ASSERT(ret == total_size, "Read KVM stats descriptors"); + + return stats_desc; +} + +/* + * Read stat data for a particular stat + * + * Input Args: + * stats_fd - the file descriptor for the binary stats file from which to read + * header - the binary stats metadata header corresponding to the given FD + * desc - the binary stat metadata for the particular stat to be read + * max_elements - the maximum number of 8-byte values to read into data + * + * Output Args: + * data - the buffer into which stat data should be read + * + * Read the data values of a specified stat from the binary stats interface. + */ +void read_stat_data(int stats_fd, struct kvm_stats_header *header, + struct kvm_stats_desc *desc, uint64_t *data, + size_t max_elements) +{ + size_t nr_elements = min_t(ssize_t, desc->size, max_elements); + size_t size = nr_elements * sizeof(*data); + ssize_t ret; + + TEST_ASSERT(desc->size, "No elements in stat '%s'", desc->name); + TEST_ASSERT(max_elements, "Zero elements requested for stat '%s'", desc->name); + + ret = pread(stats_fd, data, size, + header->data_offset + desc->offset); + + TEST_ASSERT(ret >= 0, "pread() failed on stat '%s', errno: %i (%s)", + desc->name, errno, strerror(errno)); + TEST_ASSERT(ret == size, + "pread() on stat '%s' read %ld bytes, wanted %lu bytes", + desc->name, size, ret); +} + +/* + * Read the data of the named stat + * + * Input Args: + * vm - the VM for which the stat should be read + * stat_name - the name of the stat to read + * max_elements - the maximum number of 8-byte values to read into data + * + * Output Args: + * data - the buffer into which stat data should be read + * + * Read the data values of a specified stat from the binary stats interface. + */ +void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data, + size_t max_elements) +{ + struct kvm_stats_desc *desc; + size_t size_desc; + int i; + + if (!vm->stats_fd) { + vm->stats_fd = vm_get_stats_fd(vm); + read_stats_header(vm->stats_fd, &vm->stats_header); + vm->stats_desc = read_stats_descriptors(vm->stats_fd, + &vm->stats_header); + } + + size_desc = get_stats_descriptor_size(&vm->stats_header); + + for (i = 0; i < vm->stats_header.num_desc; ++i) { + desc = (void *)vm->stats_desc + (i * size_desc); + + if (strcmp(desc->name, stat_name)) + continue; + + read_stat_data(vm->stats_fd, &vm->stats_header, desc, + data, max_elements); + + break; + } +} + +__weak void kvm_arch_vm_post_create(struct kvm_vm *vm) +{ +} + +__weak void kvm_selftest_arch_init(void) +{ +} + +void __attribute((constructor)) kvm_selftest_init(void) +{ + /* Tell stdout not to buffer its content. */ + setbuf(stdout, NULL); + + kvm_selftest_arch_init(); +} diff --git a/tools/testing/selftests/kvm/lib/memstress.c b/tools/testing/selftests/kvm/lib/memstress.c new file mode 100644 index 0000000000..df457452d1 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/memstress.c @@ -0,0 +1,397 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020, Google LLC. + */ +#define _GNU_SOURCE + +#include <inttypes.h> +#include <linux/bitmap.h> + +#include "kvm_util.h" +#include "memstress.h" +#include "processor.h" + +struct memstress_args memstress_args; + +/* + * Guest virtual memory offset of the testing memory slot. + * Must not conflict with identity mapped test code. + */ +static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM; + +struct vcpu_thread { + /* The index of the vCPU. */ + int vcpu_idx; + + /* The pthread backing the vCPU. */ + pthread_t thread; + + /* Set to true once the vCPU thread is up and running. */ + bool running; +}; + +/* The vCPU threads involved in this test. */ +static struct vcpu_thread vcpu_threads[KVM_MAX_VCPUS]; + +/* The function run by each vCPU thread, as provided by the test. */ +static void (*vcpu_thread_fn)(struct memstress_vcpu_args *); + +/* Set to true once all vCPU threads are up and running. */ +static bool all_vcpu_threads_running; + +static struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; + +/* + * Continuously write to the first 8 bytes of each page in the + * specified region. + */ +void memstress_guest_code(uint32_t vcpu_idx) +{ + struct memstress_args *args = &memstress_args; + struct memstress_vcpu_args *vcpu_args = &args->vcpu_args[vcpu_idx]; + struct guest_random_state rand_state; + uint64_t gva; + uint64_t pages; + uint64_t addr; + uint64_t page; + int i; + + rand_state = new_guest_random_state(args->random_seed + vcpu_idx); + + gva = vcpu_args->gva; + pages = vcpu_args->pages; + + /* Make sure vCPU args data structure is not corrupt. */ + GUEST_ASSERT(vcpu_args->vcpu_idx == vcpu_idx); + + while (true) { + for (i = 0; i < sizeof(memstress_args); i += args->guest_page_size) + (void) *((volatile char *)args + i); + + for (i = 0; i < pages; i++) { + if (args->random_access) + page = guest_random_u32(&rand_state) % pages; + else + page = i; + + addr = gva + (page * args->guest_page_size); + + if (guest_random_u32(&rand_state) % 100 < args->write_percent) + *(uint64_t *)addr = 0x0123456789ABCDEF; + else + READ_ONCE(*(uint64_t *)addr); + } + + GUEST_SYNC(1); + } +} + +void memstress_setup_vcpus(struct kvm_vm *vm, int nr_vcpus, + struct kvm_vcpu *vcpus[], + uint64_t vcpu_memory_bytes, + bool partition_vcpu_memory_access) +{ + struct memstress_args *args = &memstress_args; + struct memstress_vcpu_args *vcpu_args; + int i; + + for (i = 0; i < nr_vcpus; i++) { + vcpu_args = &args->vcpu_args[i]; + + vcpu_args->vcpu = vcpus[i]; + vcpu_args->vcpu_idx = i; + + if (partition_vcpu_memory_access) { + vcpu_args->gva = guest_test_virt_mem + + (i * vcpu_memory_bytes); + vcpu_args->pages = vcpu_memory_bytes / + args->guest_page_size; + vcpu_args->gpa = args->gpa + (i * vcpu_memory_bytes); + } else { + vcpu_args->gva = guest_test_virt_mem; + vcpu_args->pages = (nr_vcpus * vcpu_memory_bytes) / + args->guest_page_size; + vcpu_args->gpa = args->gpa; + } + + vcpu_args_set(vcpus[i], 1, i); + + pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n", + i, vcpu_args->gpa, vcpu_args->gpa + + (vcpu_args->pages * args->guest_page_size)); + } +} + +struct kvm_vm *memstress_create_vm(enum vm_guest_mode mode, int nr_vcpus, + uint64_t vcpu_memory_bytes, int slots, + enum vm_mem_backing_src_type backing_src, + bool partition_vcpu_memory_access) +{ + struct memstress_args *args = &memstress_args; + struct kvm_vm *vm; + uint64_t guest_num_pages, slot0_pages = 0; + uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src); + uint64_t region_end_gfn; + int i; + + pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode)); + + /* By default vCPUs will write to memory. */ + args->write_percent = 100; + + /* + * Snapshot the non-huge page size. This is used by the guest code to + * access/dirty pages at the logging granularity. + */ + args->guest_page_size = vm_guest_mode_params[mode].page_size; + + guest_num_pages = vm_adjust_num_guest_pages(mode, + (nr_vcpus * vcpu_memory_bytes) / args->guest_page_size); + + TEST_ASSERT(vcpu_memory_bytes % getpagesize() == 0, + "Guest memory size is not host page size aligned."); + TEST_ASSERT(vcpu_memory_bytes % args->guest_page_size == 0, + "Guest memory size is not guest page size aligned."); + TEST_ASSERT(guest_num_pages % slots == 0, + "Guest memory cannot be evenly divided into %d slots.", + slots); + + /* + * If using nested, allocate extra pages for the nested page tables and + * in-memory data structures. + */ + if (args->nested) + slot0_pages += memstress_nested_pages(nr_vcpus); + + /* + * Pass guest_num_pages to populate the page tables for test memory. + * The memory is also added to memslot 0, but that's a benign side + * effect as KVM allows aliasing HVAs in meslots. + */ + vm = __vm_create_with_vcpus(mode, nr_vcpus, slot0_pages + guest_num_pages, + memstress_guest_code, vcpus); + + args->vm = vm; + + /* Put the test region at the top guest physical memory. */ + region_end_gfn = vm->max_gfn + 1; + +#ifdef __x86_64__ + /* + * When running vCPUs in L2, restrict the test region to 48 bits to + * avoid needing 5-level page tables to identity map L2. + */ + if (args->nested) + region_end_gfn = min(region_end_gfn, (1UL << 48) / args->guest_page_size); +#endif + /* + * If there should be more memory in the guest test region than there + * can be pages in the guest, it will definitely cause problems. + */ + TEST_ASSERT(guest_num_pages < region_end_gfn, + "Requested more guest memory than address space allows.\n" + " guest pages: %" PRIx64 " max gfn: %" PRIx64 + " nr_vcpus: %d wss: %" PRIx64 "]\n", + guest_num_pages, region_end_gfn - 1, nr_vcpus, vcpu_memory_bytes); + + args->gpa = (region_end_gfn - guest_num_pages - 1) * args->guest_page_size; + args->gpa = align_down(args->gpa, backing_src_pagesz); +#ifdef __s390x__ + /* Align to 1M (segment size) */ + args->gpa = align_down(args->gpa, 1 << 20); +#endif + args->size = guest_num_pages * args->guest_page_size; + pr_info("guest physical test memory: [0x%lx, 0x%lx)\n", + args->gpa, args->gpa + args->size); + + /* Add extra memory slots for testing */ + for (i = 0; i < slots; i++) { + uint64_t region_pages = guest_num_pages / slots; + vm_paddr_t region_start = args->gpa + region_pages * args->guest_page_size * i; + + vm_userspace_mem_region_add(vm, backing_src, region_start, + MEMSTRESS_MEM_SLOT_INDEX + i, + region_pages, 0); + } + + /* Do mapping for the demand paging memory slot */ + virt_map(vm, guest_test_virt_mem, args->gpa, guest_num_pages); + + memstress_setup_vcpus(vm, nr_vcpus, vcpus, vcpu_memory_bytes, + partition_vcpu_memory_access); + + if (args->nested) { + pr_info("Configuring vCPUs to run in L2 (nested).\n"); + memstress_setup_nested(vm, nr_vcpus, vcpus); + } + + /* Export the shared variables to the guest. */ + sync_global_to_guest(vm, memstress_args); + + return vm; +} + +void memstress_destroy_vm(struct kvm_vm *vm) +{ + kvm_vm_free(vm); +} + +void memstress_set_write_percent(struct kvm_vm *vm, uint32_t write_percent) +{ + memstress_args.write_percent = write_percent; + sync_global_to_guest(vm, memstress_args.write_percent); +} + +void memstress_set_random_seed(struct kvm_vm *vm, uint32_t random_seed) +{ + memstress_args.random_seed = random_seed; + sync_global_to_guest(vm, memstress_args.random_seed); +} + +void memstress_set_random_access(struct kvm_vm *vm, bool random_access) +{ + memstress_args.random_access = random_access; + sync_global_to_guest(vm, memstress_args.random_access); +} + +uint64_t __weak memstress_nested_pages(int nr_vcpus) +{ + return 0; +} + +void __weak memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu **vcpus) +{ + pr_info("%s() not support on this architecture, skipping.\n", __func__); + exit(KSFT_SKIP); +} + +static void *vcpu_thread_main(void *data) +{ + struct vcpu_thread *vcpu = data; + int vcpu_idx = vcpu->vcpu_idx; + + if (memstress_args.pin_vcpus) + kvm_pin_this_task_to_pcpu(memstress_args.vcpu_to_pcpu[vcpu_idx]); + + WRITE_ONCE(vcpu->running, true); + + /* + * Wait for all vCPU threads to be up and running before calling the test- + * provided vCPU thread function. This prevents thread creation (which + * requires taking the mmap_sem in write mode) from interfering with the + * guest faulting in its memory. + */ + while (!READ_ONCE(all_vcpu_threads_running)) + ; + + vcpu_thread_fn(&memstress_args.vcpu_args[vcpu_idx]); + + return NULL; +} + +void memstress_start_vcpu_threads(int nr_vcpus, + void (*vcpu_fn)(struct memstress_vcpu_args *)) +{ + int i; + + vcpu_thread_fn = vcpu_fn; + WRITE_ONCE(all_vcpu_threads_running, false); + WRITE_ONCE(memstress_args.stop_vcpus, false); + + for (i = 0; i < nr_vcpus; i++) { + struct vcpu_thread *vcpu = &vcpu_threads[i]; + + vcpu->vcpu_idx = i; + WRITE_ONCE(vcpu->running, false); + + pthread_create(&vcpu->thread, NULL, vcpu_thread_main, vcpu); + } + + for (i = 0; i < nr_vcpus; i++) { + while (!READ_ONCE(vcpu_threads[i].running)) + ; + } + + WRITE_ONCE(all_vcpu_threads_running, true); +} + +void memstress_join_vcpu_threads(int nr_vcpus) +{ + int i; + + WRITE_ONCE(memstress_args.stop_vcpus, true); + + for (i = 0; i < nr_vcpus; i++) + pthread_join(vcpu_threads[i].thread, NULL); +} + +static void toggle_dirty_logging(struct kvm_vm *vm, int slots, bool enable) +{ + int i; + + for (i = 0; i < slots; i++) { + int slot = MEMSTRESS_MEM_SLOT_INDEX + i; + int flags = enable ? KVM_MEM_LOG_DIRTY_PAGES : 0; + + vm_mem_region_set_flags(vm, slot, flags); + } +} + +void memstress_enable_dirty_logging(struct kvm_vm *vm, int slots) +{ + toggle_dirty_logging(vm, slots, true); +} + +void memstress_disable_dirty_logging(struct kvm_vm *vm, int slots) +{ + toggle_dirty_logging(vm, slots, false); +} + +void memstress_get_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], int slots) +{ + int i; + + for (i = 0; i < slots; i++) { + int slot = MEMSTRESS_MEM_SLOT_INDEX + i; + + kvm_vm_get_dirty_log(vm, slot, bitmaps[i]); + } +} + +void memstress_clear_dirty_log(struct kvm_vm *vm, unsigned long *bitmaps[], + int slots, uint64_t pages_per_slot) +{ + int i; + + for (i = 0; i < slots; i++) { + int slot = MEMSTRESS_MEM_SLOT_INDEX + i; + + kvm_vm_clear_dirty_log(vm, slot, bitmaps[i], 0, pages_per_slot); + } +} + +unsigned long **memstress_alloc_bitmaps(int slots, uint64_t pages_per_slot) +{ + unsigned long **bitmaps; + int i; + + bitmaps = malloc(slots * sizeof(bitmaps[0])); + TEST_ASSERT(bitmaps, "Failed to allocate bitmaps array."); + + for (i = 0; i < slots; i++) { + bitmaps[i] = bitmap_zalloc(pages_per_slot); + TEST_ASSERT(bitmaps[i], "Failed to allocate slot bitmap."); + } + + return bitmaps; +} + +void memstress_free_bitmaps(unsigned long *bitmaps[], int slots) +{ + int i; + + for (i = 0; i < slots; i++) + free(bitmaps[i]); + + free(bitmaps); +} diff --git a/tools/testing/selftests/kvm/lib/rbtree.c b/tools/testing/selftests/kvm/lib/rbtree.c new file mode 100644 index 0000000000..a703f0194e --- /dev/null +++ b/tools/testing/selftests/kvm/lib/rbtree.c @@ -0,0 +1 @@ +#include "../../../../lib/rbtree.c" diff --git a/tools/testing/selftests/kvm/lib/riscv/processor.c b/tools/testing/selftests/kvm/lib/riscv/processor.c new file mode 100644 index 0000000000..d146ca71e0 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/riscv/processor.c @@ -0,0 +1,369 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * RISC-V code + * + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + */ + +#include <linux/compiler.h> +#include <assert.h> + +#include "kvm_util.h" +#include "processor.h" + +#define DEFAULT_RISCV_GUEST_STACK_VADDR_MIN 0xac0000 + +static uint64_t page_align(struct kvm_vm *vm, uint64_t v) +{ + return (v + vm->page_size) & ~(vm->page_size - 1); +} + +static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry) +{ + return ((entry & PGTBL_PTE_ADDR_MASK) >> PGTBL_PTE_ADDR_SHIFT) << + PGTBL_PAGE_SIZE_SHIFT; +} + +static uint64_t ptrs_per_pte(struct kvm_vm *vm) +{ + return PGTBL_PAGE_SIZE / sizeof(uint64_t); +} + +static uint64_t pte_index_mask[] = { + PGTBL_L0_INDEX_MASK, + PGTBL_L1_INDEX_MASK, + PGTBL_L2_INDEX_MASK, + PGTBL_L3_INDEX_MASK, +}; + +static uint32_t pte_index_shift[] = { + PGTBL_L0_INDEX_SHIFT, + PGTBL_L1_INDEX_SHIFT, + PGTBL_L2_INDEX_SHIFT, + PGTBL_L3_INDEX_SHIFT, +}; + +static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva, int level) +{ + TEST_ASSERT(level > -1, + "Negative page table level (%d) not possible", level); + TEST_ASSERT(level < vm->pgtable_levels, + "Invalid page table level (%d)", level); + + return (gva & pte_index_mask[level]) >> pte_index_shift[level]; +} + +void virt_arch_pgd_alloc(struct kvm_vm *vm) +{ + size_t nr_pages = page_align(vm, ptrs_per_pte(vm) * 8) / vm->page_size; + + if (vm->pgd_created) + return; + + vm->pgd = vm_phy_pages_alloc(vm, nr_pages, + KVM_GUEST_PAGE_TABLE_MIN_PADDR, + vm->memslots[MEM_REGION_PT]); + vm->pgd_created = true; +} + +void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) +{ + uint64_t *ptep, next_ppn; + int level = vm->pgtable_levels - 1; + + TEST_ASSERT((vaddr % vm->page_size) == 0, + "Virtual address not on page boundary,\n" + " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, + (vaddr >> vm->page_shift)), + "Invalid virtual address, vaddr: 0x%lx", vaddr); + TEST_ASSERT((paddr % vm->page_size) == 0, + "Physical address not on page boundary,\n" + " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); + TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, + "Physical address beyond maximum supported,\n" + " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", + paddr, vm->max_gfn, vm->page_size); + + ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, vaddr, level) * 8; + if (!*ptep) { + next_ppn = vm_alloc_page_table(vm) >> PGTBL_PAGE_SIZE_SHIFT; + *ptep = (next_ppn << PGTBL_PTE_ADDR_SHIFT) | + PGTBL_PTE_VALID_MASK; + } + level--; + + while (level > -1) { + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + + pte_index(vm, vaddr, level) * 8; + if (!*ptep && level > 0) { + next_ppn = vm_alloc_page_table(vm) >> + PGTBL_PAGE_SIZE_SHIFT; + *ptep = (next_ppn << PGTBL_PTE_ADDR_SHIFT) | + PGTBL_PTE_VALID_MASK; + } + level--; + } + + paddr = paddr >> PGTBL_PAGE_SIZE_SHIFT; + *ptep = (paddr << PGTBL_PTE_ADDR_SHIFT) | + PGTBL_PTE_PERM_MASK | PGTBL_PTE_VALID_MASK; +} + +vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) +{ + uint64_t *ptep; + int level = vm->pgtable_levels - 1; + + if (!vm->pgd_created) + goto unmapped_gva; + + ptep = addr_gpa2hva(vm, vm->pgd) + pte_index(vm, gva, level) * 8; + if (!ptep) + goto unmapped_gva; + level--; + + while (level > -1) { + ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + + pte_index(vm, gva, level) * 8; + if (!ptep) + goto unmapped_gva; + level--; + } + + return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); + +unmapped_gva: + TEST_FAIL("No mapping for vm virtual address gva: 0x%lx level: %d", + gva, level); + exit(1); +} + +static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, + uint64_t page, int level) +{ +#ifdef DEBUG + static const char *const type[] = { "pte", "pmd", "pud", "p4d"}; + uint64_t pte, *ptep; + + if (level < 0) + return; + + for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) { + ptep = addr_gpa2hva(vm, pte); + if (!*ptep) + continue; + fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "", + type[level], pte, *ptep, ptep); + pte_dump(stream, vm, indent + 1, + pte_addr(vm, *ptep), level - 1); + } +#endif +} + +void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +{ + int level = vm->pgtable_levels - 1; + uint64_t pgd, *ptep; + + if (!vm->pgd_created) + return; + + for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pte(vm) * 8; pgd += 8) { + ptep = addr_gpa2hva(vm, pgd); + if (!*ptep) + continue; + fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "", + pgd, *ptep, ptep); + pte_dump(stream, vm, indent + 1, + pte_addr(vm, *ptep), level - 1); + } +} + +void riscv_vcpu_mmu_setup(struct kvm_vcpu *vcpu) +{ + struct kvm_vm *vm = vcpu->vm; + unsigned long satp; + + /* + * The RISC-V Sv48 MMU mode supports 56-bit physical address + * for 48-bit virtual address with 4KB last level page size. + */ + switch (vm->mode) { + case VM_MODE_P52V48_4K: + case VM_MODE_P48V48_4K: + case VM_MODE_P40V48_4K: + break; + default: + TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode); + } + + satp = (vm->pgd >> PGTBL_PAGE_SIZE_SHIFT) & SATP_PPN; + satp |= SATP_MODE_48; + + vcpu_set_reg(vcpu, RISCV_CSR_REG(satp), satp); +} + +void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) +{ + struct kvm_riscv_core core; + + vcpu_get_reg(vcpu, RISCV_CORE_REG(mode), &core.mode); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.pc), &core.regs.pc); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.ra), &core.regs.ra); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.sp), &core.regs.sp); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.gp), &core.regs.gp); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.tp), &core.regs.tp); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t0), &core.regs.t0); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t1), &core.regs.t1); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t2), &core.regs.t2); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s0), &core.regs.s0); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s1), &core.regs.s1); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a0), &core.regs.a0); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a1), &core.regs.a1); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a2), &core.regs.a2); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a3), &core.regs.a3); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a4), &core.regs.a4); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a5), &core.regs.a5); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a6), &core.regs.a6); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.a7), &core.regs.a7); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s2), &core.regs.s2); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s3), &core.regs.s3); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s4), &core.regs.s4); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s5), &core.regs.s5); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s6), &core.regs.s6); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s7), &core.regs.s7); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s8), &core.regs.s8); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s9), &core.regs.s9); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s10), &core.regs.s10); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.s11), &core.regs.s11); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t3), &core.regs.t3); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t4), &core.regs.t4); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t5), &core.regs.t5); + vcpu_get_reg(vcpu, RISCV_CORE_REG(regs.t6), &core.regs.t6); + + fprintf(stream, + " MODE: 0x%lx\n", core.mode); + fprintf(stream, + " PC: 0x%016lx RA: 0x%016lx SP: 0x%016lx GP: 0x%016lx\n", + core.regs.pc, core.regs.ra, core.regs.sp, core.regs.gp); + fprintf(stream, + " TP: 0x%016lx T0: 0x%016lx T1: 0x%016lx T2: 0x%016lx\n", + core.regs.tp, core.regs.t0, core.regs.t1, core.regs.t2); + fprintf(stream, + " S0: 0x%016lx S1: 0x%016lx A0: 0x%016lx A1: 0x%016lx\n", + core.regs.s0, core.regs.s1, core.regs.a0, core.regs.a1); + fprintf(stream, + " A2: 0x%016lx A3: 0x%016lx A4: 0x%016lx A5: 0x%016lx\n", + core.regs.a2, core.regs.a3, core.regs.a4, core.regs.a5); + fprintf(stream, + " A6: 0x%016lx A7: 0x%016lx S2: 0x%016lx S3: 0x%016lx\n", + core.regs.a6, core.regs.a7, core.regs.s2, core.regs.s3); + fprintf(stream, + " S4: 0x%016lx S5: 0x%016lx S6: 0x%016lx S7: 0x%016lx\n", + core.regs.s4, core.regs.s5, core.regs.s6, core.regs.s7); + fprintf(stream, + " S8: 0x%016lx S9: 0x%016lx S10: 0x%016lx S11: 0x%016lx\n", + core.regs.s8, core.regs.s9, core.regs.s10, core.regs.s11); + fprintf(stream, + " T3: 0x%016lx T4: 0x%016lx T5: 0x%016lx T6: 0x%016lx\n", + core.regs.t3, core.regs.t4, core.regs.t5, core.regs.t6); +} + +static void __aligned(16) guest_unexp_trap(void) +{ + sbi_ecall(KVM_RISCV_SELFTESTS_SBI_EXT, + KVM_RISCV_SELFTESTS_SBI_UNEXP, + 0, 0, 0, 0, 0, 0); +} + +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, + void *guest_code) +{ + int r; + size_t stack_size; + unsigned long stack_vaddr; + unsigned long current_gp = 0; + struct kvm_mp_state mps; + struct kvm_vcpu *vcpu; + + stack_size = vm->page_size == 4096 ? DEFAULT_STACK_PGS * vm->page_size : + vm->page_size; + stack_vaddr = __vm_vaddr_alloc(vm, stack_size, + DEFAULT_RISCV_GUEST_STACK_VADDR_MIN, + MEM_REGION_DATA); + + vcpu = __vm_vcpu_add(vm, vcpu_id); + riscv_vcpu_mmu_setup(vcpu); + + /* + * With SBI HSM support in KVM RISC-V, all secondary VCPUs are + * powered-off by default so we ensure that all secondary VCPUs + * are powered-on using KVM_SET_MP_STATE ioctl(). + */ + mps.mp_state = KVM_MP_STATE_RUNNABLE; + r = __vcpu_ioctl(vcpu, KVM_SET_MP_STATE, &mps); + TEST_ASSERT(!r, "IOCTL KVM_SET_MP_STATE failed (error %d)", r); + + /* Setup global pointer of guest to be same as the host */ + asm volatile ( + "add %0, gp, zero" : "=r" (current_gp) : : "memory"); + vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.gp), current_gp); + + /* Setup stack pointer and program counter of guest */ + vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.sp), stack_vaddr + stack_size); + vcpu_set_reg(vcpu, RISCV_CORE_REG(regs.pc), (unsigned long)guest_code); + + /* Setup default exception vector of guest */ + vcpu_set_reg(vcpu, RISCV_CSR_REG(stvec), (unsigned long)guest_unexp_trap); + + return vcpu; +} + +void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) +{ + va_list ap; + uint64_t id = RISCV_CORE_REG(regs.a0); + int i; + + TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n" + " num: %u\n", num); + + va_start(ap, num); + + for (i = 0; i < num; i++) { + switch (i) { + case 0: + id = RISCV_CORE_REG(regs.a0); + break; + case 1: + id = RISCV_CORE_REG(regs.a1); + break; + case 2: + id = RISCV_CORE_REG(regs.a2); + break; + case 3: + id = RISCV_CORE_REG(regs.a3); + break; + case 4: + id = RISCV_CORE_REG(regs.a4); + break; + case 5: + id = RISCV_CORE_REG(regs.a5); + break; + case 6: + id = RISCV_CORE_REG(regs.a6); + break; + case 7: + id = RISCV_CORE_REG(regs.a7); + break; + } + vcpu_set_reg(vcpu, id, va_arg(ap, uint64_t)); + } + + va_end(ap); +} + +void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) +{ +} diff --git a/tools/testing/selftests/kvm/lib/riscv/ucall.c b/tools/testing/selftests/kvm/lib/riscv/ucall.c new file mode 100644 index 0000000000..fe6d1004f0 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/riscv/ucall.c @@ -0,0 +1,57 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ucall support. A ucall is a "hypercall to userspace". + * + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + */ + +#include <linux/kvm.h> + +#include "kvm_util.h" +#include "processor.h" + +struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5) +{ + register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0); + register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1); + register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2); + register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3); + register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4); + register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5); + register uintptr_t a6 asm ("a6") = (uintptr_t)(fid); + register uintptr_t a7 asm ("a7") = (uintptr_t)(ext); + struct sbiret ret; + + asm volatile ( + "ecall" + : "+r" (a0), "+r" (a1) + : "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7) + : "memory"); + ret.error = a0; + ret.value = a1; + + return ret; +} + +void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + + if (run->exit_reason == KVM_EXIT_RISCV_SBI && + run->riscv_sbi.extension_id == KVM_RISCV_SELFTESTS_SBI_EXT) { + switch (run->riscv_sbi.function_id) { + case KVM_RISCV_SELFTESTS_SBI_UCALL: + return (void *)run->riscv_sbi.args[0]; + case KVM_RISCV_SELFTESTS_SBI_UNEXP: + vcpu_dump(stderr, vcpu, 2); + TEST_ASSERT(0, "Unexpected trap taken by guest"); + break; + default: + break; + } + } + return NULL; +} diff --git a/tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c b/tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c new file mode 100644 index 0000000000..2c432fa164 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c @@ -0,0 +1,80 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Test handler for the s390x DIAGNOSE 0x0318 instruction. + * + * Copyright (C) 2020, IBM + */ + +#include "test_util.h" +#include "kvm_util.h" + +#define ICPT_INSTRUCTION 0x04 +#define IPA0_DIAG 0x8300 + +static void guest_code(void) +{ + uint64_t diag318_info = 0x12345678; + + asm volatile ("diag %0,0,0x318\n" : : "d" (diag318_info)); +} + +/* + * The DIAGNOSE 0x0318 instruction call must be handled via userspace. As such, + * we create an ad-hoc VM here to handle the instruction then extract the + * necessary data. It is up to the caller to decide what to do with that data. + */ +static uint64_t diag318_handler(void) +{ + struct kvm_vcpu *vcpu; + struct kvm_vm *vm; + struct kvm_run *run; + uint64_t reg; + uint64_t diag318_info; + + vm = vm_create_with_one_vcpu(&vcpu, guest_code); + vcpu_run(vcpu); + run = vcpu->run; + + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC); + TEST_ASSERT(run->s390_sieic.icptcode == ICPT_INSTRUCTION, + "Unexpected intercept code: 0x%x", run->s390_sieic.icptcode); + TEST_ASSERT((run->s390_sieic.ipa & 0xff00) == IPA0_DIAG, + "Unexpected IPA0 code: 0x%x", (run->s390_sieic.ipa & 0xff00)); + + reg = (run->s390_sieic.ipa & 0x00f0) >> 4; + diag318_info = run->s.regs.gprs[reg]; + + TEST_ASSERT(diag318_info != 0, "DIAGNOSE 0x0318 info not set"); + + kvm_vm_free(vm); + + return diag318_info; +} + +uint64_t get_diag318_info(void) +{ + static uint64_t diag318_info; + static bool printed_skip; + + /* + * If KVM does not support diag318, then return 0 to + * ensure tests do not break. + */ + if (!kvm_has_cap(KVM_CAP_S390_DIAG318)) { + if (!printed_skip) { + fprintf(stdout, "KVM_CAP_S390_DIAG318 not supported. " + "Skipping diag318 test.\n"); + printed_skip = true; + } + return 0; + } + + /* + * If a test has previously requested the diag318 info, + * then don't bother spinning up a temporary VM again. + */ + if (!diag318_info) + diag318_info = diag318_handler(); + + return diag318_info; +} diff --git a/tools/testing/selftests/kvm/lib/s390x/processor.c b/tools/testing/selftests/kvm/lib/s390x/processor.c new file mode 100644 index 0000000000..15945121da --- /dev/null +++ b/tools/testing/selftests/kvm/lib/s390x/processor.c @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * KVM selftest s390x library code - CPU-related functions (page tables...) + * + * Copyright (C) 2019, Red Hat, Inc. + */ + +#include "processor.h" +#include "kvm_util.h" + +#define PAGES_PER_REGION 4 + +void virt_arch_pgd_alloc(struct kvm_vm *vm) +{ + vm_paddr_t paddr; + + TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", + vm->page_size); + + if (vm->pgd_created) + return; + + paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION, + KVM_GUEST_PAGE_TABLE_MIN_PADDR, + vm->memslots[MEM_REGION_PT]); + memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); + + vm->pgd = paddr; + vm->pgd_created = true; +} + +/* + * Allocate 4 pages for a region/segment table (ri < 4), or one page for + * a page table (ri == 4). Returns a suitable region/segment table entry + * which points to the freshly allocated pages. + */ +static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri) +{ + uint64_t taddr; + + taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1, + KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0); + memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size); + + return (taddr & REGION_ENTRY_ORIGIN) + | (((4 - ri) << 2) & REGION_ENTRY_TYPE) + | ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH); +} + +void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa) +{ + int ri, idx; + uint64_t *entry; + + TEST_ASSERT((gva % vm->page_size) == 0, + "Virtual address not on page boundary,\n" + " vaddr: 0x%lx vm->page_size: 0x%x", + gva, vm->page_size); + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, + (gva >> vm->page_shift)), + "Invalid virtual address, vaddr: 0x%lx", + gva); + TEST_ASSERT((gpa % vm->page_size) == 0, + "Physical address not on page boundary,\n" + " paddr: 0x%lx vm->page_size: 0x%x", + gva, vm->page_size); + TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn, + "Physical address beyond beyond maximum supported,\n" + " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", + gva, vm->max_gfn, vm->page_size); + + /* Walk through region and segment tables */ + entry = addr_gpa2hva(vm, vm->pgd); + for (ri = 1; ri <= 4; ri++) { + idx = (gva >> (64 - 11 * ri)) & 0x7ffu; + if (entry[idx] & REGION_ENTRY_INVALID) + entry[idx] = virt_alloc_region(vm, ri); + entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN); + } + + /* Fill in page table entry */ + idx = (gva >> 12) & 0x0ffu; /* page index */ + if (!(entry[idx] & PAGE_INVALID)) + fprintf(stderr, + "WARNING: PTE for gpa=0x%"PRIx64" already set!\n", gpa); + entry[idx] = gpa; +} + +vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) +{ + int ri, idx; + uint64_t *entry; + + TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", + vm->page_size); + + entry = addr_gpa2hva(vm, vm->pgd); + for (ri = 1; ri <= 4; ri++) { + idx = (gva >> (64 - 11 * ri)) & 0x7ffu; + TEST_ASSERT(!(entry[idx] & REGION_ENTRY_INVALID), + "No region mapping for vm virtual address 0x%lx", + gva); + entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN); + } + + idx = (gva >> 12) & 0x0ffu; /* page index */ + + TEST_ASSERT(!(entry[idx] & PAGE_INVALID), + "No page mapping for vm virtual address 0x%lx", gva); + + return (entry[idx] & ~0xffful) + (gva & 0xffful); +} + +static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent, + uint64_t ptea_start) +{ + uint64_t *pte, ptea; + + for (ptea = ptea_start; ptea < ptea_start + 0x100 * 8; ptea += 8) { + pte = addr_gpa2hva(vm, ptea); + if (*pte & PAGE_INVALID) + continue; + fprintf(stream, "%*spte @ 0x%lx: 0x%016lx\n", + indent, "", ptea, *pte); + } +} + +static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent, + uint64_t reg_tab_addr) +{ + uint64_t addr, *entry; + + for (addr = reg_tab_addr; addr < reg_tab_addr + 0x400 * 8; addr += 8) { + entry = addr_gpa2hva(vm, addr); + if (*entry & REGION_ENTRY_INVALID) + continue; + fprintf(stream, "%*srt%lde @ 0x%lx: 0x%016lx\n", + indent, "", 4 - ((*entry & REGION_ENTRY_TYPE) >> 2), + addr, *entry); + if (*entry & REGION_ENTRY_TYPE) { + virt_dump_region(stream, vm, indent + 2, + *entry & REGION_ENTRY_ORIGIN); + } else { + virt_dump_ptes(stream, vm, indent + 2, + *entry & REGION_ENTRY_ORIGIN); + } + } +} + +void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +{ + if (!vm->pgd_created) + return; + + virt_dump_region(stream, vm, indent, vm->pgd); +} + +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, + void *guest_code) +{ + size_t stack_size = DEFAULT_STACK_PGS * getpagesize(); + uint64_t stack_vaddr; + struct kvm_regs regs; + struct kvm_sregs sregs; + struct kvm_vcpu *vcpu; + struct kvm_run *run; + + TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", + vm->page_size); + + stack_vaddr = __vm_vaddr_alloc(vm, stack_size, + DEFAULT_GUEST_STACK_VADDR_MIN, + MEM_REGION_DATA); + + vcpu = __vm_vcpu_add(vm, vcpu_id); + + /* Setup guest registers */ + vcpu_regs_get(vcpu, ®s); + regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160; + vcpu_regs_set(vcpu, ®s); + + vcpu_sregs_get(vcpu, &sregs); + sregs.crs[0] |= 0x00040000; /* Enable floating point regs */ + sregs.crs[1] = vm->pgd | 0xf; /* Primary region table */ + vcpu_sregs_set(vcpu, &sregs); + + run = vcpu->run; + run->psw_mask = 0x0400000180000000ULL; /* DAT enabled + 64 bit mode */ + run->psw_addr = (uintptr_t)guest_code; + + return vcpu; +} + +void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) +{ + va_list ap; + struct kvm_regs regs; + int i; + + TEST_ASSERT(num >= 1 && num <= 5, "Unsupported number of args,\n" + " num: %u\n", + num); + + va_start(ap, num); + vcpu_regs_get(vcpu, ®s); + + for (i = 0; i < num; i++) + regs.gprs[i + 2] = va_arg(ap, uint64_t); + + vcpu_regs_set(vcpu, ®s); + va_end(ap); +} + +void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) +{ + fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n", + indent, "", vcpu->run->psw_mask, vcpu->run->psw_addr); +} + +void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) +{ +} diff --git a/tools/testing/selftests/kvm/lib/s390x/ucall.c b/tools/testing/selftests/kvm/lib/s390x/ucall.c new file mode 100644 index 0000000000..cca9873465 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/s390x/ucall.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ucall support. A ucall is a "hypercall to userspace". + * + * Copyright (C) 2019 Red Hat, Inc. + */ +#include "kvm_util.h" + +void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + + if (run->exit_reason == KVM_EXIT_S390_SIEIC && + run->s390_sieic.icptcode == 4 && + (run->s390_sieic.ipa >> 8) == 0x83 && /* 0x83 means DIAGNOSE */ + (run->s390_sieic.ipb >> 16) == 0x501) { + int reg = run->s390_sieic.ipa & 0xf; + + return (void *)run->s.regs.gprs[reg]; + } + return NULL; +} diff --git a/tools/testing/selftests/kvm/lib/sparsebit.c b/tools/testing/selftests/kvm/lib/sparsebit.c new file mode 100644 index 0000000000..88cb6b84e6 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/sparsebit.c @@ -0,0 +1,2084 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Sparse bit array + * + * Copyright (C) 2018, Google LLC. + * Copyright (C) 2018, Red Hat, Inc. (code style cleanup and fuzzing driver) + * + * This library provides functions to support a memory efficient bit array, + * with an index size of 2^64. A sparsebit array is allocated through + * the use sparsebit_alloc() and free'd via sparsebit_free(), + * such as in the following: + * + * struct sparsebit *s; + * s = sparsebit_alloc(); + * sparsebit_free(&s); + * + * The struct sparsebit type resolves down to a struct sparsebit. + * Note that, sparsebit_free() takes a pointer to the sparsebit + * structure. This is so that sparsebit_free() is able to poison + * the pointer (e.g. set it to NULL) to the struct sparsebit before + * returning to the caller. + * + * Between the return of sparsebit_alloc() and the call of + * sparsebit_free(), there are multiple query and modifying operations + * that can be performed on the allocated sparsebit array. All of + * these operations take as a parameter the value returned from + * sparsebit_alloc() and most also take a bit index. Frequently + * used routines include: + * + * ---- Query Operations + * sparsebit_is_set(s, idx) + * sparsebit_is_clear(s, idx) + * sparsebit_any_set(s) + * sparsebit_first_set(s) + * sparsebit_next_set(s, prev_idx) + * + * ---- Modifying Operations + * sparsebit_set(s, idx) + * sparsebit_clear(s, idx) + * sparsebit_set_num(s, idx, num); + * sparsebit_clear_num(s, idx, num); + * + * A common operation, is to itterate over all the bits set in a test + * sparsebit array. This can be done via code with the following structure: + * + * sparsebit_idx_t idx; + * if (sparsebit_any_set(s)) { + * idx = sparsebit_first_set(s); + * do { + * ... + * idx = sparsebit_next_set(s, idx); + * } while (idx != 0); + * } + * + * The index of the first bit set needs to be obtained via + * sparsebit_first_set(), because sparsebit_next_set(), needs + * the index of the previously set. The sparsebit_idx_t type is + * unsigned, so there is no previous index before 0 that is available. + * Also, the call to sparsebit_first_set() is not made unless there + * is at least 1 bit in the array set. This is because sparsebit_first_set() + * aborts if sparsebit_first_set() is called with no bits set. + * It is the callers responsibility to assure that the + * sparsebit array has at least a single bit set before calling + * sparsebit_first_set(). + * + * ==== Implementation Overview ==== + * For the most part the internal implementation of sparsebit is + * opaque to the caller. One important implementation detail that the + * caller may need to be aware of is the spatial complexity of the + * implementation. This implementation of a sparsebit array is not + * only sparse, in that it uses memory proportional to the number of bits + * set. It is also efficient in memory usage when most of the bits are + * set. + * + * At a high-level the state of the bit settings are maintained through + * the use of a binary-search tree, where each node contains at least + * the following members: + * + * typedef uint64_t sparsebit_idx_t; + * typedef uint64_t sparsebit_num_t; + * + * sparsebit_idx_t idx; + * uint32_t mask; + * sparsebit_num_t num_after; + * + * The idx member contains the bit index of the first bit described by this + * node, while the mask member stores the setting of the first 32-bits. + * The setting of the bit at idx + n, where 0 <= n < 32, is located in the + * mask member at 1 << n. + * + * Nodes are sorted by idx and the bits described by two nodes will never + * overlap. The idx member is always aligned to the mask size, i.e. a + * multiple of 32. + * + * Beyond a typical implementation, the nodes in this implementation also + * contains a member named num_after. The num_after member holds the + * number of bits immediately after the mask bits that are contiguously set. + * The use of the num_after member allows this implementation to efficiently + * represent cases where most bits are set. For example, the case of all + * but the last two bits set, is represented by the following two nodes: + * + * node 0 - idx: 0x0 mask: 0xffffffff num_after: 0xffffffffffffffc0 + * node 1 - idx: 0xffffffffffffffe0 mask: 0x3fffffff num_after: 0 + * + * ==== Invariants ==== + * This implementation usses the following invariants: + * + * + Node are only used to represent bits that are set. + * Nodes with a mask of 0 and num_after of 0 are not allowed. + * + * + Sum of bits set in all the nodes is equal to the value of + * the struct sparsebit_pvt num_set member. + * + * + The setting of at least one bit is always described in a nodes + * mask (mask >= 1). + * + * + A node with all mask bits set only occurs when the last bit + * described by the previous node is not equal to this nodes + * starting index - 1. All such occurences of this condition are + * avoided by moving the setting of the nodes mask bits into + * the previous nodes num_after setting. + * + * + Node starting index is evenly divisible by the number of bits + * within a nodes mask member. + * + * + Nodes never represent a range of bits that wrap around the + * highest supported index. + * + * (idx + MASK_BITS + num_after - 1) <= ((sparsebit_idx_t) 0) - 1) + * + * As a consequence of the above, the num_after member of a node + * will always be <=: + * + * maximum_index - nodes_starting_index - number_of_mask_bits + * + * + Nodes within the binary search tree are sorted based on each + * nodes starting index. + * + * + The range of bits described by any two nodes do not overlap. The + * range of bits described by a single node is: + * + * start: node->idx + * end (inclusive): node->idx + MASK_BITS + node->num_after - 1; + * + * Note, at times these invariants are temporarily violated for a + * specific portion of the code. For example, when setting a mask + * bit, there is a small delay between when the mask bit is set and the + * value in the struct sparsebit_pvt num_set member is updated. Other + * temporary violations occur when node_split() is called with a specified + * index and assures that a node where its mask represents the bit + * at the specified index exists. At times to do this node_split() + * must split an existing node into two nodes or create a node that + * has no bits set. Such temporary violations must be corrected before + * returning to the caller. These corrections are typically performed + * by the local function node_reduce(). + */ + +#include "test_util.h" +#include "sparsebit.h" +#include <limits.h> +#include <assert.h> + +#define DUMP_LINE_MAX 100 /* Does not include indent amount */ + +typedef uint32_t mask_t; +#define MASK_BITS (sizeof(mask_t) * CHAR_BIT) + +struct node { + struct node *parent; + struct node *left; + struct node *right; + sparsebit_idx_t idx; /* index of least-significant bit in mask */ + sparsebit_num_t num_after; /* num contiguously set after mask */ + mask_t mask; +}; + +struct sparsebit { + /* + * Points to root node of the binary search + * tree. Equal to NULL when no bits are set in + * the entire sparsebit array. + */ + struct node *root; + + /* + * A redundant count of the total number of bits set. Used for + * diagnostic purposes and to change the time complexity of + * sparsebit_num_set() from O(n) to O(1). + * Note: Due to overflow, a value of 0 means none or all set. + */ + sparsebit_num_t num_set; +}; + +/* Returns the number of set bits described by the settings + * of the node pointed to by nodep. + */ +static sparsebit_num_t node_num_set(struct node *nodep) +{ + return nodep->num_after + __builtin_popcount(nodep->mask); +} + +/* Returns a pointer to the node that describes the + * lowest bit index. + */ +static struct node *node_first(struct sparsebit *s) +{ + struct node *nodep; + + for (nodep = s->root; nodep && nodep->left; nodep = nodep->left) + ; + + return nodep; +} + +/* Returns a pointer to the node that describes the + * lowest bit index > the index of the node pointed to by np. + * Returns NULL if no node with a higher index exists. + */ +static struct node *node_next(struct sparsebit *s, struct node *np) +{ + struct node *nodep = np; + + /* + * If current node has a right child, next node is the left-most + * of the right child. + */ + if (nodep->right) { + for (nodep = nodep->right; nodep->left; nodep = nodep->left) + ; + return nodep; + } + + /* + * No right child. Go up until node is left child of a parent. + * That parent is then the next node. + */ + while (nodep->parent && nodep == nodep->parent->right) + nodep = nodep->parent; + + return nodep->parent; +} + +/* Searches for and returns a pointer to the node that describes the + * highest index < the index of the node pointed to by np. + * Returns NULL if no node with a lower index exists. + */ +static struct node *node_prev(struct sparsebit *s, struct node *np) +{ + struct node *nodep = np; + + /* + * If current node has a left child, next node is the right-most + * of the left child. + */ + if (nodep->left) { + for (nodep = nodep->left; nodep->right; nodep = nodep->right) + ; + return (struct node *) nodep; + } + + /* + * No left child. Go up until node is right child of a parent. + * That parent is then the next node. + */ + while (nodep->parent && nodep == nodep->parent->left) + nodep = nodep->parent; + + return (struct node *) nodep->parent; +} + + +/* Allocates space to hold a copy of the node sub-tree pointed to by + * subtree and duplicates the bit settings to the newly allocated nodes. + * Returns the newly allocated copy of subtree. + */ +static struct node *node_copy_subtree(struct node *subtree) +{ + struct node *root; + + /* Duplicate the node at the root of the subtree */ + root = calloc(1, sizeof(*root)); + if (!root) { + perror("calloc"); + abort(); + } + + root->idx = subtree->idx; + root->mask = subtree->mask; + root->num_after = subtree->num_after; + + /* As needed, recursively duplicate the left and right subtrees */ + if (subtree->left) { + root->left = node_copy_subtree(subtree->left); + root->left->parent = root; + } + + if (subtree->right) { + root->right = node_copy_subtree(subtree->right); + root->right->parent = root; + } + + return root; +} + +/* Searches for and returns a pointer to the node that describes the setting + * of the bit given by idx. A node describes the setting of a bit if its + * index is within the bits described by the mask bits or the number of + * contiguous bits set after the mask. Returns NULL if there is no such node. + */ +static struct node *node_find(struct sparsebit *s, sparsebit_idx_t idx) +{ + struct node *nodep; + + /* Find the node that describes the setting of the bit at idx */ + for (nodep = s->root; nodep; + nodep = nodep->idx > idx ? nodep->left : nodep->right) { + if (idx >= nodep->idx && + idx <= nodep->idx + MASK_BITS + nodep->num_after - 1) + break; + } + + return nodep; +} + +/* Entry Requirements: + * + A node that describes the setting of idx is not already present. + * + * Adds a new node to describe the setting of the bit at the index given + * by idx. Returns a pointer to the newly added node. + * + * TODO(lhuemill): Degenerate cases causes the tree to get unbalanced. + */ +static struct node *node_add(struct sparsebit *s, sparsebit_idx_t idx) +{ + struct node *nodep, *parentp, *prev; + + /* Allocate and initialize the new node. */ + nodep = calloc(1, sizeof(*nodep)); + if (!nodep) { + perror("calloc"); + abort(); + } + + nodep->idx = idx & -MASK_BITS; + + /* If no nodes, set it up as the root node. */ + if (!s->root) { + s->root = nodep; + return nodep; + } + + /* + * Find the parent where the new node should be attached + * and add the node there. + */ + parentp = s->root; + while (true) { + if (idx < parentp->idx) { + if (!parentp->left) { + parentp->left = nodep; + nodep->parent = parentp; + break; + } + parentp = parentp->left; + } else { + assert(idx > parentp->idx + MASK_BITS + parentp->num_after - 1); + if (!parentp->right) { + parentp->right = nodep; + nodep->parent = parentp; + break; + } + parentp = parentp->right; + } + } + + /* + * Does num_after bits of previous node overlap with the mask + * of the new node? If so set the bits in the new nodes mask + * and reduce the previous nodes num_after. + */ + prev = node_prev(s, nodep); + while (prev && prev->idx + MASK_BITS + prev->num_after - 1 >= nodep->idx) { + unsigned int n1 = (prev->idx + MASK_BITS + prev->num_after - 1) + - nodep->idx; + assert(prev->num_after > 0); + assert(n1 < MASK_BITS); + assert(!(nodep->mask & (1 << n1))); + nodep->mask |= (1 << n1); + prev->num_after--; + } + + return nodep; +} + +/* Returns whether all the bits in the sparsebit array are set. */ +bool sparsebit_all_set(struct sparsebit *s) +{ + /* + * If any nodes there must be at least one bit set. Only case + * where a bit is set and total num set is 0, is when all bits + * are set. + */ + return s->root && s->num_set == 0; +} + +/* Clears all bits described by the node pointed to by nodep, then + * removes the node. + */ +static void node_rm(struct sparsebit *s, struct node *nodep) +{ + struct node *tmp; + sparsebit_num_t num_set; + + num_set = node_num_set(nodep); + assert(s->num_set >= num_set || sparsebit_all_set(s)); + s->num_set -= node_num_set(nodep); + + /* Have both left and right child */ + if (nodep->left && nodep->right) { + /* + * Move left children to the leftmost leaf node + * of the right child. + */ + for (tmp = nodep->right; tmp->left; tmp = tmp->left) + ; + tmp->left = nodep->left; + nodep->left = NULL; + tmp->left->parent = tmp; + } + + /* Left only child */ + if (nodep->left) { + if (!nodep->parent) { + s->root = nodep->left; + nodep->left->parent = NULL; + } else { + nodep->left->parent = nodep->parent; + if (nodep == nodep->parent->left) + nodep->parent->left = nodep->left; + else { + assert(nodep == nodep->parent->right); + nodep->parent->right = nodep->left; + } + } + + nodep->parent = nodep->left = nodep->right = NULL; + free(nodep); + + return; + } + + + /* Right only child */ + if (nodep->right) { + if (!nodep->parent) { + s->root = nodep->right; + nodep->right->parent = NULL; + } else { + nodep->right->parent = nodep->parent; + if (nodep == nodep->parent->left) + nodep->parent->left = nodep->right; + else { + assert(nodep == nodep->parent->right); + nodep->parent->right = nodep->right; + } + } + + nodep->parent = nodep->left = nodep->right = NULL; + free(nodep); + + return; + } + + /* Leaf Node */ + if (!nodep->parent) { + s->root = NULL; + } else { + if (nodep->parent->left == nodep) + nodep->parent->left = NULL; + else { + assert(nodep == nodep->parent->right); + nodep->parent->right = NULL; + } + } + + nodep->parent = nodep->left = nodep->right = NULL; + free(nodep); + + return; +} + +/* Splits the node containing the bit at idx so that there is a node + * that starts at the specified index. If no such node exists, a new + * node at the specified index is created. Returns the new node. + * + * idx must start of a mask boundary. + */ +static struct node *node_split(struct sparsebit *s, sparsebit_idx_t idx) +{ + struct node *nodep1, *nodep2; + sparsebit_idx_t offset; + sparsebit_num_t orig_num_after; + + assert(!(idx % MASK_BITS)); + + /* + * Is there a node that describes the setting of idx? + * If not, add it. + */ + nodep1 = node_find(s, idx); + if (!nodep1) + return node_add(s, idx); + + /* + * All done if the starting index of the node is where the + * split should occur. + */ + if (nodep1->idx == idx) + return nodep1; + + /* + * Split point not at start of mask, so it must be part of + * bits described by num_after. + */ + + /* + * Calculate offset within num_after for where the split is + * to occur. + */ + offset = idx - (nodep1->idx + MASK_BITS); + orig_num_after = nodep1->num_after; + + /* + * Add a new node to describe the bits starting at + * the split point. + */ + nodep1->num_after = offset; + nodep2 = node_add(s, idx); + + /* Move bits after the split point into the new node */ + nodep2->num_after = orig_num_after - offset; + if (nodep2->num_after >= MASK_BITS) { + nodep2->mask = ~(mask_t) 0; + nodep2->num_after -= MASK_BITS; + } else { + nodep2->mask = (1 << nodep2->num_after) - 1; + nodep2->num_after = 0; + } + + return nodep2; +} + +/* Iteratively reduces the node pointed to by nodep and its adjacent + * nodes into a more compact form. For example, a node with a mask with + * all bits set adjacent to a previous node, will get combined into a + * single node with an increased num_after setting. + * + * After each reduction, a further check is made to see if additional + * reductions are possible with the new previous and next nodes. Note, + * a search for a reduction is only done across the nodes nearest nodep + * and those that became part of a reduction. Reductions beyond nodep + * and the adjacent nodes that are reduced are not discovered. It is the + * responsibility of the caller to pass a nodep that is within one node + * of each possible reduction. + * + * This function does not fix the temporary violation of all invariants. + * For example it does not fix the case where the bit settings described + * by two or more nodes overlap. Such a violation introduces the potential + * complication of a bit setting for a specific index having different settings + * in different nodes. This would then introduce the further complication + * of which node has the correct setting of the bit and thus such conditions + * are not allowed. + * + * This function is designed to fix invariant violations that are introduced + * by node_split() and by changes to the nodes mask or num_after members. + * For example, when setting a bit within a nodes mask, the function that + * sets the bit doesn't have to worry about whether the setting of that + * bit caused the mask to have leading only or trailing only bits set. + * Instead, the function can call node_reduce(), with nodep equal to the + * node address that it set a mask bit in, and node_reduce() will notice + * the cases of leading or trailing only bits and that there is an + * adjacent node that the bit settings could be merged into. + * + * This implementation specifically detects and corrects violation of the + * following invariants: + * + * + Node are only used to represent bits that are set. + * Nodes with a mask of 0 and num_after of 0 are not allowed. + * + * + The setting of at least one bit is always described in a nodes + * mask (mask >= 1). + * + * + A node with all mask bits set only occurs when the last bit + * described by the previous node is not equal to this nodes + * starting index - 1. All such occurences of this condition are + * avoided by moving the setting of the nodes mask bits into + * the previous nodes num_after setting. + */ +static void node_reduce(struct sparsebit *s, struct node *nodep) +{ + bool reduction_performed; + + do { + reduction_performed = false; + struct node *prev, *next, *tmp; + + /* 1) Potential reductions within the current node. */ + + /* Nodes with all bits cleared may be removed. */ + if (nodep->mask == 0 && nodep->num_after == 0) { + /* + * About to remove the node pointed to by + * nodep, which normally would cause a problem + * for the next pass through the reduction loop, + * because the node at the starting point no longer + * exists. This potential problem is handled + * by first remembering the location of the next + * or previous nodes. Doesn't matter which, because + * once the node at nodep is removed, there will be + * no other nodes between prev and next. + * + * Note, the checks performed on nodep against both + * both prev and next both check for an adjacent + * node that can be reduced into a single node. As + * such, after removing the node at nodep, doesn't + * matter whether the nodep for the next pass + * through the loop is equal to the previous pass + * prev or next node. Either way, on the next pass + * the one not selected will become either the + * prev or next node. + */ + tmp = node_next(s, nodep); + if (!tmp) + tmp = node_prev(s, nodep); + + node_rm(s, nodep); + + nodep = tmp; + reduction_performed = true; + continue; + } + + /* + * When the mask is 0, can reduce the amount of num_after + * bits by moving the initial num_after bits into the mask. + */ + if (nodep->mask == 0) { + assert(nodep->num_after != 0); + assert(nodep->idx + MASK_BITS > nodep->idx); + + nodep->idx += MASK_BITS; + + if (nodep->num_after >= MASK_BITS) { + nodep->mask = ~0; + nodep->num_after -= MASK_BITS; + } else { + nodep->mask = (1u << nodep->num_after) - 1; + nodep->num_after = 0; + } + + reduction_performed = true; + continue; + } + + /* + * 2) Potential reductions between the current and + * previous nodes. + */ + prev = node_prev(s, nodep); + if (prev) { + sparsebit_idx_t prev_highest_bit; + + /* Nodes with no bits set can be removed. */ + if (prev->mask == 0 && prev->num_after == 0) { + node_rm(s, prev); + + reduction_performed = true; + continue; + } + + /* + * All mask bits set and previous node has + * adjacent index. + */ + if (nodep->mask + 1 == 0 && + prev->idx + MASK_BITS == nodep->idx) { + prev->num_after += MASK_BITS + nodep->num_after; + nodep->mask = 0; + nodep->num_after = 0; + + reduction_performed = true; + continue; + } + + /* + * Is node adjacent to previous node and the node + * contains a single contiguous range of bits + * starting from the beginning of the mask? + */ + prev_highest_bit = prev->idx + MASK_BITS - 1 + prev->num_after; + if (prev_highest_bit + 1 == nodep->idx && + (nodep->mask | (nodep->mask >> 1)) == nodep->mask) { + /* + * How many contiguous bits are there? + * Is equal to the total number of set + * bits, due to an earlier check that + * there is a single contiguous range of + * set bits. + */ + unsigned int num_contiguous + = __builtin_popcount(nodep->mask); + assert((num_contiguous > 0) && + ((1ULL << num_contiguous) - 1) == nodep->mask); + + prev->num_after += num_contiguous; + nodep->mask = 0; + + /* + * For predictable performance, handle special + * case where all mask bits are set and there + * is a non-zero num_after setting. This code + * is functionally correct without the following + * conditionalized statements, but without them + * the value of num_after is only reduced by + * the number of mask bits per pass. There are + * cases where num_after can be close to 2^64. + * Without this code it could take nearly + * (2^64) / 32 passes to perform the full + * reduction. + */ + if (num_contiguous == MASK_BITS) { + prev->num_after += nodep->num_after; + nodep->num_after = 0; + } + + reduction_performed = true; + continue; + } + } + + /* + * 3) Potential reductions between the current and + * next nodes. + */ + next = node_next(s, nodep); + if (next) { + /* Nodes with no bits set can be removed. */ + if (next->mask == 0 && next->num_after == 0) { + node_rm(s, next); + reduction_performed = true; + continue; + } + + /* + * Is next node index adjacent to current node + * and has a mask with all bits set? + */ + if (next->idx == nodep->idx + MASK_BITS + nodep->num_after && + next->mask == ~(mask_t) 0) { + nodep->num_after += MASK_BITS; + next->mask = 0; + nodep->num_after += next->num_after; + next->num_after = 0; + + node_rm(s, next); + next = NULL; + + reduction_performed = true; + continue; + } + } + } while (nodep && reduction_performed); +} + +/* Returns whether the bit at the index given by idx, within the + * sparsebit array is set or not. + */ +bool sparsebit_is_set(struct sparsebit *s, sparsebit_idx_t idx) +{ + struct node *nodep; + + /* Find the node that describes the setting of the bit at idx */ + for (nodep = s->root; nodep; + nodep = nodep->idx > idx ? nodep->left : nodep->right) + if (idx >= nodep->idx && + idx <= nodep->idx + MASK_BITS + nodep->num_after - 1) + goto have_node; + + return false; + +have_node: + /* Bit is set if it is any of the bits described by num_after */ + if (nodep->num_after && idx >= nodep->idx + MASK_BITS) + return true; + + /* Is the corresponding mask bit set */ + assert(idx >= nodep->idx && idx - nodep->idx < MASK_BITS); + return !!(nodep->mask & (1 << (idx - nodep->idx))); +} + +/* Within the sparsebit array pointed to by s, sets the bit + * at the index given by idx. + */ +static void bit_set(struct sparsebit *s, sparsebit_idx_t idx) +{ + struct node *nodep; + + /* Skip bits that are already set */ + if (sparsebit_is_set(s, idx)) + return; + + /* + * Get a node where the bit at idx is described by the mask. + * The node_split will also create a node, if there isn't + * already a node that describes the setting of bit. + */ + nodep = node_split(s, idx & -MASK_BITS); + + /* Set the bit within the nodes mask */ + assert(idx >= nodep->idx && idx <= nodep->idx + MASK_BITS - 1); + assert(!(nodep->mask & (1 << (idx - nodep->idx)))); + nodep->mask |= 1 << (idx - nodep->idx); + s->num_set++; + + node_reduce(s, nodep); +} + +/* Within the sparsebit array pointed to by s, clears the bit + * at the index given by idx. + */ +static void bit_clear(struct sparsebit *s, sparsebit_idx_t idx) +{ + struct node *nodep; + + /* Skip bits that are already cleared */ + if (!sparsebit_is_set(s, idx)) + return; + + /* Is there a node that describes the setting of this bit? */ + nodep = node_find(s, idx); + if (!nodep) + return; + + /* + * If a num_after bit, split the node, so that the bit is + * part of a node mask. + */ + if (idx >= nodep->idx + MASK_BITS) + nodep = node_split(s, idx & -MASK_BITS); + + /* + * After node_split above, bit at idx should be within the mask. + * Clear that bit. + */ + assert(idx >= nodep->idx && idx <= nodep->idx + MASK_BITS - 1); + assert(nodep->mask & (1 << (idx - nodep->idx))); + nodep->mask &= ~(1 << (idx - nodep->idx)); + assert(s->num_set > 0 || sparsebit_all_set(s)); + s->num_set--; + + node_reduce(s, nodep); +} + +/* Recursively dumps to the FILE stream given by stream the contents + * of the sub-tree of nodes pointed to by nodep. Each line of output + * is prefixed by the number of spaces given by indent. On each + * recursion, the indent amount is increased by 2. This causes nodes + * at each level deeper into the binary search tree to be displayed + * with a greater indent. + */ +static void dump_nodes(FILE *stream, struct node *nodep, + unsigned int indent) +{ + char *node_type; + + /* Dump contents of node */ + if (!nodep->parent) + node_type = "root"; + else if (nodep == nodep->parent->left) + node_type = "left"; + else { + assert(nodep == nodep->parent->right); + node_type = "right"; + } + fprintf(stream, "%*s---- %s nodep: %p\n", indent, "", node_type, nodep); + fprintf(stream, "%*s parent: %p left: %p right: %p\n", indent, "", + nodep->parent, nodep->left, nodep->right); + fprintf(stream, "%*s idx: 0x%lx mask: 0x%x num_after: 0x%lx\n", + indent, "", nodep->idx, nodep->mask, nodep->num_after); + + /* If present, dump contents of left child nodes */ + if (nodep->left) + dump_nodes(stream, nodep->left, indent + 2); + + /* If present, dump contents of right child nodes */ + if (nodep->right) + dump_nodes(stream, nodep->right, indent + 2); +} + +static inline sparsebit_idx_t node_first_set(struct node *nodep, int start) +{ + mask_t leading = (mask_t)1 << start; + int n1 = __builtin_ctz(nodep->mask & -leading); + + return nodep->idx + n1; +} + +static inline sparsebit_idx_t node_first_clear(struct node *nodep, int start) +{ + mask_t leading = (mask_t)1 << start; + int n1 = __builtin_ctz(~nodep->mask & -leading); + + return nodep->idx + n1; +} + +/* Dumps to the FILE stream specified by stream, the implementation dependent + * internal state of s. Each line of output is prefixed with the number + * of spaces given by indent. The output is completely implementation + * dependent and subject to change. Output from this function should only + * be used for diagnostic purposes. For example, this function can be + * used by test cases after they detect an unexpected condition, as a means + * to capture diagnostic information. + */ +static void sparsebit_dump_internal(FILE *stream, struct sparsebit *s, + unsigned int indent) +{ + /* Dump the contents of s */ + fprintf(stream, "%*sroot: %p\n", indent, "", s->root); + fprintf(stream, "%*snum_set: 0x%lx\n", indent, "", s->num_set); + + if (s->root) + dump_nodes(stream, s->root, indent); +} + +/* Allocates and returns a new sparsebit array. The initial state + * of the newly allocated sparsebit array has all bits cleared. + */ +struct sparsebit *sparsebit_alloc(void) +{ + struct sparsebit *s; + + /* Allocate top level structure. */ + s = calloc(1, sizeof(*s)); + if (!s) { + perror("calloc"); + abort(); + } + + return s; +} + +/* Frees the implementation dependent data for the sparsebit array + * pointed to by s and poisons the pointer to that data. + */ +void sparsebit_free(struct sparsebit **sbitp) +{ + struct sparsebit *s = *sbitp; + + if (!s) + return; + + sparsebit_clear_all(s); + free(s); + *sbitp = NULL; +} + +/* Makes a copy of the sparsebit array given by s, to the sparsebit + * array given by d. Note, d must have already been allocated via + * sparsebit_alloc(). It can though already have bits set, which + * if different from src will be cleared. + */ +void sparsebit_copy(struct sparsebit *d, struct sparsebit *s) +{ + /* First clear any bits already set in the destination */ + sparsebit_clear_all(d); + + if (s->root) { + d->root = node_copy_subtree(s->root); + d->num_set = s->num_set; + } +} + +/* Returns whether num consecutive bits starting at idx are all set. */ +bool sparsebit_is_set_num(struct sparsebit *s, + sparsebit_idx_t idx, sparsebit_num_t num) +{ + sparsebit_idx_t next_cleared; + + assert(num > 0); + assert(idx + num - 1 >= idx); + + /* With num > 0, the first bit must be set. */ + if (!sparsebit_is_set(s, idx)) + return false; + + /* Find the next cleared bit */ + next_cleared = sparsebit_next_clear(s, idx); + + /* + * If no cleared bits beyond idx, then there are at least num + * set bits. idx + num doesn't wrap. Otherwise check if + * there are enough set bits between idx and the next cleared bit. + */ + return next_cleared == 0 || next_cleared - idx >= num; +} + +/* Returns whether the bit at the index given by idx. */ +bool sparsebit_is_clear(struct sparsebit *s, + sparsebit_idx_t idx) +{ + return !sparsebit_is_set(s, idx); +} + +/* Returns whether num consecutive bits starting at idx are all cleared. */ +bool sparsebit_is_clear_num(struct sparsebit *s, + sparsebit_idx_t idx, sparsebit_num_t num) +{ + sparsebit_idx_t next_set; + + assert(num > 0); + assert(idx + num - 1 >= idx); + + /* With num > 0, the first bit must be cleared. */ + if (!sparsebit_is_clear(s, idx)) + return false; + + /* Find the next set bit */ + next_set = sparsebit_next_set(s, idx); + + /* + * If no set bits beyond idx, then there are at least num + * cleared bits. idx + num doesn't wrap. Otherwise check if + * there are enough cleared bits between idx and the next set bit. + */ + return next_set == 0 || next_set - idx >= num; +} + +/* Returns the total number of bits set. Note: 0 is also returned for + * the case of all bits set. This is because with all bits set, there + * is 1 additional bit set beyond what can be represented in the return + * value. Use sparsebit_any_set(), instead of sparsebit_num_set() > 0, + * to determine if the sparsebit array has any bits set. + */ +sparsebit_num_t sparsebit_num_set(struct sparsebit *s) +{ + return s->num_set; +} + +/* Returns whether any bit is set in the sparsebit array. */ +bool sparsebit_any_set(struct sparsebit *s) +{ + /* + * Nodes only describe set bits. If any nodes then there + * is at least 1 bit set. + */ + if (!s->root) + return false; + + /* + * Every node should have a non-zero mask. For now will + * just assure that the root node has a non-zero mask, + * which is a quick check that at least 1 bit is set. + */ + assert(s->root->mask != 0); + assert(s->num_set > 0 || + (s->root->num_after == ((sparsebit_num_t) 0) - MASK_BITS && + s->root->mask == ~(mask_t) 0)); + + return true; +} + +/* Returns whether all the bits in the sparsebit array are cleared. */ +bool sparsebit_all_clear(struct sparsebit *s) +{ + return !sparsebit_any_set(s); +} + +/* Returns whether all the bits in the sparsebit array are set. */ +bool sparsebit_any_clear(struct sparsebit *s) +{ + return !sparsebit_all_set(s); +} + +/* Returns the index of the first set bit. Abort if no bits are set. + */ +sparsebit_idx_t sparsebit_first_set(struct sparsebit *s) +{ + struct node *nodep; + + /* Validate at least 1 bit is set */ + assert(sparsebit_any_set(s)); + + nodep = node_first(s); + return node_first_set(nodep, 0); +} + +/* Returns the index of the first cleared bit. Abort if + * no bits are cleared. + */ +sparsebit_idx_t sparsebit_first_clear(struct sparsebit *s) +{ + struct node *nodep1, *nodep2; + + /* Validate at least 1 bit is cleared. */ + assert(sparsebit_any_clear(s)); + + /* If no nodes or first node index > 0 then lowest cleared is 0 */ + nodep1 = node_first(s); + if (!nodep1 || nodep1->idx > 0) + return 0; + + /* Does the mask in the first node contain any cleared bits. */ + if (nodep1->mask != ~(mask_t) 0) + return node_first_clear(nodep1, 0); + + /* + * All mask bits set in first node. If there isn't a second node + * then the first cleared bit is the first bit after the bits + * described by the first node. + */ + nodep2 = node_next(s, nodep1); + if (!nodep2) { + /* + * No second node. First cleared bit is first bit beyond + * bits described by first node. + */ + assert(nodep1->mask == ~(mask_t) 0); + assert(nodep1->idx + MASK_BITS + nodep1->num_after != (sparsebit_idx_t) 0); + return nodep1->idx + MASK_BITS + nodep1->num_after; + } + + /* + * There is a second node. + * If it is not adjacent to the first node, then there is a gap + * of cleared bits between the nodes, and the first cleared bit + * is the first bit within the gap. + */ + if (nodep1->idx + MASK_BITS + nodep1->num_after != nodep2->idx) + return nodep1->idx + MASK_BITS + nodep1->num_after; + + /* + * Second node is adjacent to the first node. + * Because it is adjacent, its mask should be non-zero. If all + * its mask bits are set, then with it being adjacent, it should + * have had the mask bits moved into the num_after setting of the + * previous node. + */ + return node_first_clear(nodep2, 0); +} + +/* Returns index of next bit set within s after the index given by prev. + * Returns 0 if there are no bits after prev that are set. + */ +sparsebit_idx_t sparsebit_next_set(struct sparsebit *s, + sparsebit_idx_t prev) +{ + sparsebit_idx_t lowest_possible = prev + 1; + sparsebit_idx_t start; + struct node *nodep; + + /* A bit after the highest index can't be set. */ + if (lowest_possible == 0) + return 0; + + /* + * Find the leftmost 'candidate' overlapping or to the right + * of lowest_possible. + */ + struct node *candidate = NULL; + + /* True iff lowest_possible is within candidate */ + bool contains = false; + + /* + * Find node that describes setting of bit at lowest_possible. + * If such a node doesn't exist, find the node with the lowest + * starting index that is > lowest_possible. + */ + for (nodep = s->root; nodep;) { + if ((nodep->idx + MASK_BITS + nodep->num_after - 1) + >= lowest_possible) { + candidate = nodep; + if (candidate->idx <= lowest_possible) { + contains = true; + break; + } + nodep = nodep->left; + } else { + nodep = nodep->right; + } + } + if (!candidate) + return 0; + + assert(candidate->mask != 0); + + /* Does the candidate node describe the setting of lowest_possible? */ + if (!contains) { + /* + * Candidate doesn't describe setting of bit at lowest_possible. + * Candidate points to the first node with a starting index + * > lowest_possible. + */ + assert(candidate->idx > lowest_possible); + + return node_first_set(candidate, 0); + } + + /* + * Candidate describes setting of bit at lowest_possible. + * Note: although the node describes the setting of the bit + * at lowest_possible, its possible that its setting and the + * setting of all latter bits described by this node are 0. + * For now, just handle the cases where this node describes + * a bit at or after an index of lowest_possible that is set. + */ + start = lowest_possible - candidate->idx; + + if (start < MASK_BITS && candidate->mask >= (1 << start)) + return node_first_set(candidate, start); + + if (candidate->num_after) { + sparsebit_idx_t first_num_after_idx = candidate->idx + MASK_BITS; + + return lowest_possible < first_num_after_idx + ? first_num_after_idx : lowest_possible; + } + + /* + * Although candidate node describes setting of bit at + * the index of lowest_possible, all bits at that index and + * latter that are described by candidate are cleared. With + * this, the next bit is the first bit in the next node, if + * such a node exists. If a next node doesn't exist, then + * there is no next set bit. + */ + candidate = node_next(s, candidate); + if (!candidate) + return 0; + + return node_first_set(candidate, 0); +} + +/* Returns index of next bit cleared within s after the index given by prev. + * Returns 0 if there are no bits after prev that are cleared. + */ +sparsebit_idx_t sparsebit_next_clear(struct sparsebit *s, + sparsebit_idx_t prev) +{ + sparsebit_idx_t lowest_possible = prev + 1; + sparsebit_idx_t idx; + struct node *nodep1, *nodep2; + + /* A bit after the highest index can't be set. */ + if (lowest_possible == 0) + return 0; + + /* + * Does a node describing the setting of lowest_possible exist? + * If not, the bit at lowest_possible is cleared. + */ + nodep1 = node_find(s, lowest_possible); + if (!nodep1) + return lowest_possible; + + /* Does a mask bit in node 1 describe the next cleared bit. */ + for (idx = lowest_possible - nodep1->idx; idx < MASK_BITS; idx++) + if (!(nodep1->mask & (1 << idx))) + return nodep1->idx + idx; + + /* + * Next cleared bit is not described by node 1. If there + * isn't a next node, then next cleared bit is described + * by bit after the bits described by the first node. + */ + nodep2 = node_next(s, nodep1); + if (!nodep2) + return nodep1->idx + MASK_BITS + nodep1->num_after; + + /* + * There is a second node. + * If it is not adjacent to the first node, then there is a gap + * of cleared bits between the nodes, and the next cleared bit + * is the first bit within the gap. + */ + if (nodep1->idx + MASK_BITS + nodep1->num_after != nodep2->idx) + return nodep1->idx + MASK_BITS + nodep1->num_after; + + /* + * Second node is adjacent to the first node. + * Because it is adjacent, its mask should be non-zero. If all + * its mask bits are set, then with it being adjacent, it should + * have had the mask bits moved into the num_after setting of the + * previous node. + */ + return node_first_clear(nodep2, 0); +} + +/* Starting with the index 1 greater than the index given by start, finds + * and returns the index of the first sequence of num consecutively set + * bits. Returns a value of 0 of no such sequence exists. + */ +sparsebit_idx_t sparsebit_next_set_num(struct sparsebit *s, + sparsebit_idx_t start, sparsebit_num_t num) +{ + sparsebit_idx_t idx; + + assert(num >= 1); + + for (idx = sparsebit_next_set(s, start); + idx != 0 && idx + num - 1 >= idx; + idx = sparsebit_next_set(s, idx)) { + assert(sparsebit_is_set(s, idx)); + + /* + * Does the sequence of bits starting at idx consist of + * num set bits? + */ + if (sparsebit_is_set_num(s, idx, num)) + return idx; + + /* + * Sequence of set bits at idx isn't large enough. + * Skip this entire sequence of set bits. + */ + idx = sparsebit_next_clear(s, idx); + if (idx == 0) + return 0; + } + + return 0; +} + +/* Starting with the index 1 greater than the index given by start, finds + * and returns the index of the first sequence of num consecutively cleared + * bits. Returns a value of 0 of no such sequence exists. + */ +sparsebit_idx_t sparsebit_next_clear_num(struct sparsebit *s, + sparsebit_idx_t start, sparsebit_num_t num) +{ + sparsebit_idx_t idx; + + assert(num >= 1); + + for (idx = sparsebit_next_clear(s, start); + idx != 0 && idx + num - 1 >= idx; + idx = sparsebit_next_clear(s, idx)) { + assert(sparsebit_is_clear(s, idx)); + + /* + * Does the sequence of bits starting at idx consist of + * num cleared bits? + */ + if (sparsebit_is_clear_num(s, idx, num)) + return idx; + + /* + * Sequence of cleared bits at idx isn't large enough. + * Skip this entire sequence of cleared bits. + */ + idx = sparsebit_next_set(s, idx); + if (idx == 0) + return 0; + } + + return 0; +} + +/* Sets the bits * in the inclusive range idx through idx + num - 1. */ +void sparsebit_set_num(struct sparsebit *s, + sparsebit_idx_t start, sparsebit_num_t num) +{ + struct node *nodep, *next; + unsigned int n1; + sparsebit_idx_t idx; + sparsebit_num_t n; + sparsebit_idx_t middle_start, middle_end; + + assert(num > 0); + assert(start + num - 1 >= start); + + /* + * Leading - bits before first mask boundary. + * + * TODO(lhuemill): With some effort it may be possible to + * replace the following loop with a sequential sequence + * of statements. High level sequence would be: + * + * 1. Use node_split() to force node that describes setting + * of idx to be within the mask portion of a node. + * 2. Form mask of bits to be set. + * 3. Determine number of mask bits already set in the node + * and store in a local variable named num_already_set. + * 4. Set the appropriate mask bits within the node. + * 5. Increment struct sparsebit_pvt num_set member + * by the number of bits that were actually set. + * Exclude from the counts bits that were already set. + * 6. Before returning to the caller, use node_reduce() to + * handle the multiple corner cases that this method + * introduces. + */ + for (idx = start, n = num; n > 0 && idx % MASK_BITS != 0; idx++, n--) + bit_set(s, idx); + + /* Middle - bits spanning one or more entire mask */ + middle_start = idx; + middle_end = middle_start + (n & -MASK_BITS) - 1; + if (n >= MASK_BITS) { + nodep = node_split(s, middle_start); + + /* + * As needed, split just after end of middle bits. + * No split needed if end of middle bits is at highest + * supported bit index. + */ + if (middle_end + 1 > middle_end) + (void) node_split(s, middle_end + 1); + + /* Delete nodes that only describe bits within the middle. */ + for (next = node_next(s, nodep); + next && (next->idx < middle_end); + next = node_next(s, nodep)) { + assert(next->idx + MASK_BITS + next->num_after - 1 <= middle_end); + node_rm(s, next); + next = NULL; + } + + /* As needed set each of the mask bits */ + for (n1 = 0; n1 < MASK_BITS; n1++) { + if (!(nodep->mask & (1 << n1))) { + nodep->mask |= 1 << n1; + s->num_set++; + } + } + + s->num_set -= nodep->num_after; + nodep->num_after = middle_end - middle_start + 1 - MASK_BITS; + s->num_set += nodep->num_after; + + node_reduce(s, nodep); + } + idx = middle_end + 1; + n -= middle_end - middle_start + 1; + + /* Trailing - bits at and beyond last mask boundary */ + assert(n < MASK_BITS); + for (; n > 0; idx++, n--) + bit_set(s, idx); +} + +/* Clears the bits * in the inclusive range idx through idx + num - 1. */ +void sparsebit_clear_num(struct sparsebit *s, + sparsebit_idx_t start, sparsebit_num_t num) +{ + struct node *nodep, *next; + unsigned int n1; + sparsebit_idx_t idx; + sparsebit_num_t n; + sparsebit_idx_t middle_start, middle_end; + + assert(num > 0); + assert(start + num - 1 >= start); + + /* Leading - bits before first mask boundary */ + for (idx = start, n = num; n > 0 && idx % MASK_BITS != 0; idx++, n--) + bit_clear(s, idx); + + /* Middle - bits spanning one or more entire mask */ + middle_start = idx; + middle_end = middle_start + (n & -MASK_BITS) - 1; + if (n >= MASK_BITS) { + nodep = node_split(s, middle_start); + + /* + * As needed, split just after end of middle bits. + * No split needed if end of middle bits is at highest + * supported bit index. + */ + if (middle_end + 1 > middle_end) + (void) node_split(s, middle_end + 1); + + /* Delete nodes that only describe bits within the middle. */ + for (next = node_next(s, nodep); + next && (next->idx < middle_end); + next = node_next(s, nodep)) { + assert(next->idx + MASK_BITS + next->num_after - 1 <= middle_end); + node_rm(s, next); + next = NULL; + } + + /* As needed clear each of the mask bits */ + for (n1 = 0; n1 < MASK_BITS; n1++) { + if (nodep->mask & (1 << n1)) { + nodep->mask &= ~(1 << n1); + s->num_set--; + } + } + + /* Clear any bits described by num_after */ + s->num_set -= nodep->num_after; + nodep->num_after = 0; + + /* + * Delete the node that describes the beginning of + * the middle bits and perform any allowed reductions + * with the nodes prev or next of nodep. + */ + node_reduce(s, nodep); + nodep = NULL; + } + idx = middle_end + 1; + n -= middle_end - middle_start + 1; + + /* Trailing - bits at and beyond last mask boundary */ + assert(n < MASK_BITS); + for (; n > 0; idx++, n--) + bit_clear(s, idx); +} + +/* Sets the bit at the index given by idx. */ +void sparsebit_set(struct sparsebit *s, sparsebit_idx_t idx) +{ + sparsebit_set_num(s, idx, 1); +} + +/* Clears the bit at the index given by idx. */ +void sparsebit_clear(struct sparsebit *s, sparsebit_idx_t idx) +{ + sparsebit_clear_num(s, idx, 1); +} + +/* Sets the bits in the entire addressable range of the sparsebit array. */ +void sparsebit_set_all(struct sparsebit *s) +{ + sparsebit_set(s, 0); + sparsebit_set_num(s, 1, ~(sparsebit_idx_t) 0); + assert(sparsebit_all_set(s)); +} + +/* Clears the bits in the entire addressable range of the sparsebit array. */ +void sparsebit_clear_all(struct sparsebit *s) +{ + sparsebit_clear(s, 0); + sparsebit_clear_num(s, 1, ~(sparsebit_idx_t) 0); + assert(!sparsebit_any_set(s)); +} + +static size_t display_range(FILE *stream, sparsebit_idx_t low, + sparsebit_idx_t high, bool prepend_comma_space) +{ + char *fmt_str; + size_t sz; + + /* Determine the printf format string */ + if (low == high) + fmt_str = prepend_comma_space ? ", 0x%lx" : "0x%lx"; + else + fmt_str = prepend_comma_space ? ", 0x%lx:0x%lx" : "0x%lx:0x%lx"; + + /* + * When stream is NULL, just determine the size of what would + * have been printed, else print the range. + */ + if (!stream) + sz = snprintf(NULL, 0, fmt_str, low, high); + else + sz = fprintf(stream, fmt_str, low, high); + + return sz; +} + + +/* Dumps to the FILE stream given by stream, the bit settings + * of s. Each line of output is prefixed with the number of + * spaces given by indent. The length of each line is implementation + * dependent and does not depend on the indent amount. The following + * is an example output of a sparsebit array that has bits: + * + * 0x5, 0x8, 0xa:0xe, 0x12 + * + * This corresponds to a sparsebit whose bits 5, 8, 10, 11, 12, 13, 14, 18 + * are set. Note that a ':', instead of a '-' is used to specify a range of + * contiguous bits. This is done because '-' is used to specify command-line + * options, and sometimes ranges are specified as command-line arguments. + */ +void sparsebit_dump(FILE *stream, struct sparsebit *s, + unsigned int indent) +{ + size_t current_line_len = 0; + size_t sz; + struct node *nodep; + + if (!sparsebit_any_set(s)) + return; + + /* Display initial indent */ + fprintf(stream, "%*s", indent, ""); + + /* For each node */ + for (nodep = node_first(s); nodep; nodep = node_next(s, nodep)) { + unsigned int n1; + sparsebit_idx_t low, high; + + /* For each group of bits in the mask */ + for (n1 = 0; n1 < MASK_BITS; n1++) { + if (nodep->mask & (1 << n1)) { + low = high = nodep->idx + n1; + + for (; n1 < MASK_BITS; n1++) { + if (nodep->mask & (1 << n1)) + high = nodep->idx + n1; + else + break; + } + + if ((n1 == MASK_BITS) && nodep->num_after) + high += nodep->num_after; + + /* + * How much room will it take to display + * this range. + */ + sz = display_range(NULL, low, high, + current_line_len != 0); + + /* + * If there is not enough room, display + * a newline plus the indent of the next + * line. + */ + if (current_line_len + sz > DUMP_LINE_MAX) { + fputs("\n", stream); + fprintf(stream, "%*s", indent, ""); + current_line_len = 0; + } + + /* Display the range */ + sz = display_range(stream, low, high, + current_line_len != 0); + current_line_len += sz; + } + } + + /* + * If num_after and most significant-bit of mask is not + * set, then still need to display a range for the bits + * described by num_after. + */ + if (!(nodep->mask & (1 << (MASK_BITS - 1))) && nodep->num_after) { + low = nodep->idx + MASK_BITS; + high = nodep->idx + MASK_BITS + nodep->num_after - 1; + + /* + * How much room will it take to display + * this range. + */ + sz = display_range(NULL, low, high, + current_line_len != 0); + + /* + * If there is not enough room, display + * a newline plus the indent of the next + * line. + */ + if (current_line_len + sz > DUMP_LINE_MAX) { + fputs("\n", stream); + fprintf(stream, "%*s", indent, ""); + current_line_len = 0; + } + + /* Display the range */ + sz = display_range(stream, low, high, + current_line_len != 0); + current_line_len += sz; + } + } + fputs("\n", stream); +} + +/* Validates the internal state of the sparsebit array given by + * s. On error, diagnostic information is printed to stderr and + * abort is called. + */ +void sparsebit_validate_internal(struct sparsebit *s) +{ + bool error_detected = false; + struct node *nodep, *prev = NULL; + sparsebit_num_t total_bits_set = 0; + unsigned int n1; + + /* For each node */ + for (nodep = node_first(s); nodep; + prev = nodep, nodep = node_next(s, nodep)) { + + /* + * Increase total bits set by the number of bits set + * in this node. + */ + for (n1 = 0; n1 < MASK_BITS; n1++) + if (nodep->mask & (1 << n1)) + total_bits_set++; + + total_bits_set += nodep->num_after; + + /* + * Arbitrary choice as to whether a mask of 0 is allowed + * or not. For diagnostic purposes it is beneficial to + * have only one valid means to represent a set of bits. + * To support this an arbitrary choice has been made + * to not allow a mask of zero. + */ + if (nodep->mask == 0) { + fprintf(stderr, "Node mask of zero, " + "nodep: %p nodep->mask: 0x%x", + nodep, nodep->mask); + error_detected = true; + break; + } + + /* + * Validate num_after is not greater than the max index + * - the number of mask bits. The num_after member + * uses 0-based indexing and thus has no value that + * represents all bits set. This limitation is handled + * by requiring a non-zero mask. With a non-zero mask, + * MASK_BITS worth of bits are described by the mask, + * which makes the largest needed num_after equal to: + * + * (~(sparsebit_num_t) 0) - MASK_BITS + 1 + */ + if (nodep->num_after + > (~(sparsebit_num_t) 0) - MASK_BITS + 1) { + fprintf(stderr, "num_after too large, " + "nodep: %p nodep->num_after: 0x%lx", + nodep, nodep->num_after); + error_detected = true; + break; + } + + /* Validate node index is divisible by the mask size */ + if (nodep->idx % MASK_BITS) { + fprintf(stderr, "Node index not divisible by " + "mask size,\n" + " nodep: %p nodep->idx: 0x%lx " + "MASK_BITS: %lu\n", + nodep, nodep->idx, MASK_BITS); + error_detected = true; + break; + } + + /* + * Validate bits described by node don't wrap beyond the + * highest supported index. + */ + if ((nodep->idx + MASK_BITS + nodep->num_after - 1) < nodep->idx) { + fprintf(stderr, "Bits described by node wrap " + "beyond highest supported index,\n" + " nodep: %p nodep->idx: 0x%lx\n" + " MASK_BITS: %lu nodep->num_after: 0x%lx", + nodep, nodep->idx, MASK_BITS, nodep->num_after); + error_detected = true; + break; + } + + /* Check parent pointers. */ + if (nodep->left) { + if (nodep->left->parent != nodep) { + fprintf(stderr, "Left child parent pointer " + "doesn't point to this node,\n" + " nodep: %p nodep->left: %p " + "nodep->left->parent: %p", + nodep, nodep->left, + nodep->left->parent); + error_detected = true; + break; + } + } + + if (nodep->right) { + if (nodep->right->parent != nodep) { + fprintf(stderr, "Right child parent pointer " + "doesn't point to this node,\n" + " nodep: %p nodep->right: %p " + "nodep->right->parent: %p", + nodep, nodep->right, + nodep->right->parent); + error_detected = true; + break; + } + } + + if (!nodep->parent) { + if (s->root != nodep) { + fprintf(stderr, "Unexpected root node, " + "s->root: %p nodep: %p", + s->root, nodep); + error_detected = true; + break; + } + } + + if (prev) { + /* + * Is index of previous node before index of + * current node? + */ + if (prev->idx >= nodep->idx) { + fprintf(stderr, "Previous node index " + ">= current node index,\n" + " prev: %p prev->idx: 0x%lx\n" + " nodep: %p nodep->idx: 0x%lx", + prev, prev->idx, nodep, nodep->idx); + error_detected = true; + break; + } + + /* + * Nodes occur in asscending order, based on each + * nodes starting index. + */ + if ((prev->idx + MASK_BITS + prev->num_after - 1) + >= nodep->idx) { + fprintf(stderr, "Previous node bit range " + "overlap with current node bit range,\n" + " prev: %p prev->idx: 0x%lx " + "prev->num_after: 0x%lx\n" + " nodep: %p nodep->idx: 0x%lx " + "nodep->num_after: 0x%lx\n" + " MASK_BITS: %lu", + prev, prev->idx, prev->num_after, + nodep, nodep->idx, nodep->num_after, + MASK_BITS); + error_detected = true; + break; + } + + /* + * When the node has all mask bits set, it shouldn't + * be adjacent to the last bit described by the + * previous node. + */ + if (nodep->mask == ~(mask_t) 0 && + prev->idx + MASK_BITS + prev->num_after == nodep->idx) { + fprintf(stderr, "Current node has mask with " + "all bits set and is adjacent to the " + "previous node,\n" + " prev: %p prev->idx: 0x%lx " + "prev->num_after: 0x%lx\n" + " nodep: %p nodep->idx: 0x%lx " + "nodep->num_after: 0x%lx\n" + " MASK_BITS: %lu", + prev, prev->idx, prev->num_after, + nodep, nodep->idx, nodep->num_after, + MASK_BITS); + + error_detected = true; + break; + } + } + } + + if (!error_detected) { + /* + * Is sum of bits set in each node equal to the count + * of total bits set. + */ + if (s->num_set != total_bits_set) { + fprintf(stderr, "Number of bits set mismatch,\n" + " s->num_set: 0x%lx total_bits_set: 0x%lx", + s->num_set, total_bits_set); + + error_detected = true; + } + } + + if (error_detected) { + fputs(" dump_internal:\n", stderr); + sparsebit_dump_internal(stderr, s, 4); + abort(); + } +} + + +#ifdef FUZZ +/* A simple but effective fuzzing driver. Look for bugs with the help + * of some invariants and of a trivial representation of sparsebit. + * Just use 512 bytes of /dev/zero and /dev/urandom as inputs, and let + * afl-fuzz do the magic. :) + */ + +#include <stdlib.h> + +struct range { + sparsebit_idx_t first, last; + bool set; +}; + +struct sparsebit *s; +struct range ranges[1000]; +int num_ranges; + +static bool get_value(sparsebit_idx_t idx) +{ + int i; + + for (i = num_ranges; --i >= 0; ) + if (ranges[i].first <= idx && idx <= ranges[i].last) + return ranges[i].set; + + return false; +} + +static void operate(int code, sparsebit_idx_t first, sparsebit_idx_t last) +{ + sparsebit_num_t num; + sparsebit_idx_t next; + + if (first < last) { + num = last - first + 1; + } else { + num = first - last + 1; + first = last; + last = first + num - 1; + } + + switch (code) { + case 0: + sparsebit_set(s, first); + assert(sparsebit_is_set(s, first)); + assert(!sparsebit_is_clear(s, first)); + assert(sparsebit_any_set(s)); + assert(!sparsebit_all_clear(s)); + if (get_value(first)) + return; + if (num_ranges == 1000) + exit(0); + ranges[num_ranges++] = (struct range) + { .first = first, .last = first, .set = true }; + break; + case 1: + sparsebit_clear(s, first); + assert(!sparsebit_is_set(s, first)); + assert(sparsebit_is_clear(s, first)); + assert(sparsebit_any_clear(s)); + assert(!sparsebit_all_set(s)); + if (!get_value(first)) + return; + if (num_ranges == 1000) + exit(0); + ranges[num_ranges++] = (struct range) + { .first = first, .last = first, .set = false }; + break; + case 2: + assert(sparsebit_is_set(s, first) == get_value(first)); + assert(sparsebit_is_clear(s, first) == !get_value(first)); + break; + case 3: + if (sparsebit_any_set(s)) + assert(get_value(sparsebit_first_set(s))); + if (sparsebit_any_clear(s)) + assert(!get_value(sparsebit_first_clear(s))); + sparsebit_set_all(s); + assert(!sparsebit_any_clear(s)); + assert(sparsebit_all_set(s)); + num_ranges = 0; + ranges[num_ranges++] = (struct range) + { .first = 0, .last = ~(sparsebit_idx_t)0, .set = true }; + break; + case 4: + if (sparsebit_any_set(s)) + assert(get_value(sparsebit_first_set(s))); + if (sparsebit_any_clear(s)) + assert(!get_value(sparsebit_first_clear(s))); + sparsebit_clear_all(s); + assert(!sparsebit_any_set(s)); + assert(sparsebit_all_clear(s)); + num_ranges = 0; + break; + case 5: + next = sparsebit_next_set(s, first); + assert(next == 0 || next > first); + assert(next == 0 || get_value(next)); + break; + case 6: + next = sparsebit_next_clear(s, first); + assert(next == 0 || next > first); + assert(next == 0 || !get_value(next)); + break; + case 7: + next = sparsebit_next_clear(s, first); + if (sparsebit_is_set_num(s, first, num)) { + assert(next == 0 || next > last); + if (first) + next = sparsebit_next_set(s, first - 1); + else if (sparsebit_any_set(s)) + next = sparsebit_first_set(s); + else + return; + assert(next == first); + } else { + assert(sparsebit_is_clear(s, first) || next <= last); + } + break; + case 8: + next = sparsebit_next_set(s, first); + if (sparsebit_is_clear_num(s, first, num)) { + assert(next == 0 || next > last); + if (first) + next = sparsebit_next_clear(s, first - 1); + else if (sparsebit_any_clear(s)) + next = sparsebit_first_clear(s); + else + return; + assert(next == first); + } else { + assert(sparsebit_is_set(s, first) || next <= last); + } + break; + case 9: + sparsebit_set_num(s, first, num); + assert(sparsebit_is_set_num(s, first, num)); + assert(!sparsebit_is_clear_num(s, first, num)); + assert(sparsebit_any_set(s)); + assert(!sparsebit_all_clear(s)); + if (num_ranges == 1000) + exit(0); + ranges[num_ranges++] = (struct range) + { .first = first, .last = last, .set = true }; + break; + case 10: + sparsebit_clear_num(s, first, num); + assert(!sparsebit_is_set_num(s, first, num)); + assert(sparsebit_is_clear_num(s, first, num)); + assert(sparsebit_any_clear(s)); + assert(!sparsebit_all_set(s)); + if (num_ranges == 1000) + exit(0); + ranges[num_ranges++] = (struct range) + { .first = first, .last = last, .set = false }; + break; + case 11: + sparsebit_validate_internal(s); + break; + default: + break; + } +} + +unsigned char get8(void) +{ + int ch; + + ch = getchar(); + if (ch == EOF) + exit(0); + return ch; +} + +uint64_t get64(void) +{ + uint64_t x; + + x = get8(); + x = (x << 8) | get8(); + x = (x << 8) | get8(); + x = (x << 8) | get8(); + x = (x << 8) | get8(); + x = (x << 8) | get8(); + x = (x << 8) | get8(); + return (x << 8) | get8(); +} + +int main(void) +{ + s = sparsebit_alloc(); + for (;;) { + uint8_t op = get8() & 0xf; + uint64_t first = get64(); + uint64_t last = get64(); + + operate(op, first, last); + } +} +#endif diff --git a/tools/testing/selftests/kvm/lib/string_override.c b/tools/testing/selftests/kvm/lib/string_override.c new file mode 100644 index 0000000000..5d1c87277c --- /dev/null +++ b/tools/testing/selftests/kvm/lib/string_override.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include <stddef.h> + +/* + * Override the "basic" built-in string helpers so that they can be used in + * guest code. KVM selftests don't support dynamic loading in guest code and + * will jump into the weeds if the compiler decides to insert an out-of-line + * call via the PLT. + */ +int memcmp(const void *cs, const void *ct, size_t count) +{ + const unsigned char *su1, *su2; + int res = 0; + + for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) { + if ((res = *su1 - *su2) != 0) + break; + } + return res; +} + +void *memcpy(void *dest, const void *src, size_t count) +{ + char *tmp = dest; + const char *s = src; + + while (count--) + *tmp++ = *s++; + return dest; +} + +void *memset(void *s, int c, size_t count) +{ + char *xs = s; + + while (count--) + *xs++ = c; + return s; +} + +size_t strnlen(const char *s, size_t count) +{ + const char *sc; + + for (sc = s; count-- && *sc != '\0'; ++sc) + /* nothing */; + return sc - s; +} diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c new file mode 100644 index 0000000000..5d7f28b02d --- /dev/null +++ b/tools/testing/selftests/kvm/lib/test_util.c @@ -0,0 +1,394 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * tools/testing/selftests/kvm/lib/test_util.c + * + * Copyright (C) 2020, Google LLC. + */ + +#define _GNU_SOURCE +#include <stdio.h> +#include <stdarg.h> +#include <assert.h> +#include <ctype.h> +#include <limits.h> +#include <stdlib.h> +#include <time.h> +#include <sys/stat.h> +#include <sys/syscall.h> +#include <linux/mman.h> +#include "linux/kernel.h" + +#include "test_util.h" + +/* + * Random number generator that is usable from guest code. This is the + * Park-Miller LCG using standard constants. + */ + +struct guest_random_state new_guest_random_state(uint32_t seed) +{ + struct guest_random_state s = {.seed = seed}; + return s; +} + +uint32_t guest_random_u32(struct guest_random_state *state) +{ + state->seed = (uint64_t)state->seed * 48271 % ((uint32_t)(1 << 31) - 1); + return state->seed; +} + +/* + * Parses "[0-9]+[kmgt]?". + */ +size_t parse_size(const char *size) +{ + size_t base; + char *scale; + int shift = 0; + + TEST_ASSERT(size && isdigit(size[0]), "Need at least one digit in '%s'", size); + + base = strtoull(size, &scale, 0); + + TEST_ASSERT(base != ULLONG_MAX, "Overflow parsing size!"); + + switch (tolower(*scale)) { + case 't': + shift = 40; + break; + case 'g': + shift = 30; + break; + case 'm': + shift = 20; + break; + case 'k': + shift = 10; + break; + case 'b': + case '\0': + shift = 0; + break; + default: + TEST_ASSERT(false, "Unknown size letter %c", *scale); + } + + TEST_ASSERT((base << shift) >> shift == base, "Overflow scaling size!"); + + return base << shift; +} + +int64_t timespec_to_ns(struct timespec ts) +{ + return (int64_t)ts.tv_nsec + 1000000000LL * (int64_t)ts.tv_sec; +} + +struct timespec timespec_add_ns(struct timespec ts, int64_t ns) +{ + struct timespec res; + + res.tv_nsec = ts.tv_nsec + ns; + res.tv_sec = ts.tv_sec + res.tv_nsec / 1000000000LL; + res.tv_nsec %= 1000000000LL; + + return res; +} + +struct timespec timespec_add(struct timespec ts1, struct timespec ts2) +{ + int64_t ns1 = timespec_to_ns(ts1); + int64_t ns2 = timespec_to_ns(ts2); + return timespec_add_ns((struct timespec){0}, ns1 + ns2); +} + +struct timespec timespec_sub(struct timespec ts1, struct timespec ts2) +{ + int64_t ns1 = timespec_to_ns(ts1); + int64_t ns2 = timespec_to_ns(ts2); + return timespec_add_ns((struct timespec){0}, ns1 - ns2); +} + +struct timespec timespec_elapsed(struct timespec start) +{ + struct timespec end; + + clock_gettime(CLOCK_MONOTONIC, &end); + return timespec_sub(end, start); +} + +struct timespec timespec_div(struct timespec ts, int divisor) +{ + int64_t ns = timespec_to_ns(ts) / divisor; + + return timespec_add_ns((struct timespec){0}, ns); +} + +void print_skip(const char *fmt, ...) +{ + va_list ap; + + assert(fmt); + va_start(ap, fmt); + vprintf(fmt, ap); + va_end(ap); + puts(", skipping test"); +} + +bool thp_configured(void) +{ + int ret; + struct stat statbuf; + + ret = stat("/sys/kernel/mm/transparent_hugepage", &statbuf); + TEST_ASSERT(ret == 0 || (ret == -1 && errno == ENOENT), + "Error in stating /sys/kernel/mm/transparent_hugepage"); + + return ret == 0; +} + +size_t get_trans_hugepagesz(void) +{ + size_t size; + FILE *f; + int ret; + + TEST_ASSERT(thp_configured(), "THP is not configured in host kernel"); + + f = fopen("/sys/kernel/mm/transparent_hugepage/hpage_pmd_size", "r"); + TEST_ASSERT(f != NULL, "Error in opening transparent_hugepage/hpage_pmd_size"); + + ret = fscanf(f, "%ld", &size); + ret = fscanf(f, "%ld", &size); + TEST_ASSERT(ret < 1, "Error reading transparent_hugepage/hpage_pmd_size"); + fclose(f); + + return size; +} + +size_t get_def_hugetlb_pagesz(void) +{ + char buf[64]; + const char *hugepagesize = "Hugepagesize:"; + const char *hugepages_total = "HugePages_Total:"; + FILE *f; + + f = fopen("/proc/meminfo", "r"); + TEST_ASSERT(f != NULL, "Error in opening /proc/meminfo"); + + while (fgets(buf, sizeof(buf), f) != NULL) { + if (strstr(buf, hugepages_total) == buf) { + unsigned long long total = strtoull(buf + strlen(hugepages_total), NULL, 10); + if (!total) { + fprintf(stderr, "HUGETLB is not enabled in /proc/sys/vm/nr_hugepages\n"); + exit(KSFT_SKIP); + } + } + if (strstr(buf, hugepagesize) == buf) { + fclose(f); + return strtoull(buf + strlen(hugepagesize), NULL, 10) << 10; + } + } + + if (feof(f)) { + fprintf(stderr, "HUGETLB is not configured in host kernel"); + exit(KSFT_SKIP); + } + + TEST_FAIL("Error in reading /proc/meminfo"); +} + +#define ANON_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS) +#define ANON_HUGE_FLAGS (ANON_FLAGS | MAP_HUGETLB) + +const struct vm_mem_backing_src_alias *vm_mem_backing_src_alias(uint32_t i) +{ + static const struct vm_mem_backing_src_alias aliases[] = { + [VM_MEM_SRC_ANONYMOUS] = { + .name = "anonymous", + .flag = ANON_FLAGS, + }, + [VM_MEM_SRC_ANONYMOUS_THP] = { + .name = "anonymous_thp", + .flag = ANON_FLAGS, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB] = { + .name = "anonymous_hugetlb", + .flag = ANON_HUGE_FLAGS, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_16KB] = { + .name = "anonymous_hugetlb_16kb", + .flag = ANON_HUGE_FLAGS | MAP_HUGE_16KB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_64KB] = { + .name = "anonymous_hugetlb_64kb", + .flag = ANON_HUGE_FLAGS | MAP_HUGE_64KB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_512KB] = { + .name = "anonymous_hugetlb_512kb", + .flag = ANON_HUGE_FLAGS | MAP_HUGE_512KB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_1MB] = { + .name = "anonymous_hugetlb_1mb", + .flag = ANON_HUGE_FLAGS | MAP_HUGE_1MB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_2MB] = { + .name = "anonymous_hugetlb_2mb", + .flag = ANON_HUGE_FLAGS | MAP_HUGE_2MB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_8MB] = { + .name = "anonymous_hugetlb_8mb", + .flag = ANON_HUGE_FLAGS | MAP_HUGE_8MB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_16MB] = { + .name = "anonymous_hugetlb_16mb", + .flag = ANON_HUGE_FLAGS | MAP_HUGE_16MB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_32MB] = { + .name = "anonymous_hugetlb_32mb", + .flag = ANON_HUGE_FLAGS | MAP_HUGE_32MB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_256MB] = { + .name = "anonymous_hugetlb_256mb", + .flag = ANON_HUGE_FLAGS | MAP_HUGE_256MB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_512MB] = { + .name = "anonymous_hugetlb_512mb", + .flag = ANON_HUGE_FLAGS | MAP_HUGE_512MB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_1GB] = { + .name = "anonymous_hugetlb_1gb", + .flag = ANON_HUGE_FLAGS | MAP_HUGE_1GB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_2GB] = { + .name = "anonymous_hugetlb_2gb", + .flag = ANON_HUGE_FLAGS | MAP_HUGE_2GB, + }, + [VM_MEM_SRC_ANONYMOUS_HUGETLB_16GB] = { + .name = "anonymous_hugetlb_16gb", + .flag = ANON_HUGE_FLAGS | MAP_HUGE_16GB, + }, + [VM_MEM_SRC_SHMEM] = { + .name = "shmem", + .flag = MAP_SHARED, + }, + [VM_MEM_SRC_SHARED_HUGETLB] = { + .name = "shared_hugetlb", + /* + * No MAP_HUGETLB, we use MFD_HUGETLB instead. Since + * we're using "file backed" memory, we need to specify + * this when the FD is created, not when the area is + * mapped. + */ + .flag = MAP_SHARED, + }, + }; + _Static_assert(ARRAY_SIZE(aliases) == NUM_SRC_TYPES, + "Missing new backing src types?"); + + TEST_ASSERT(i < NUM_SRC_TYPES, "Backing src type ID %d too big", i); + + return &aliases[i]; +} + +#define MAP_HUGE_PAGE_SIZE(x) (1ULL << ((x >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK)) + +size_t get_backing_src_pagesz(uint32_t i) +{ + uint32_t flag = vm_mem_backing_src_alias(i)->flag; + + switch (i) { + case VM_MEM_SRC_ANONYMOUS: + case VM_MEM_SRC_SHMEM: + return getpagesize(); + case VM_MEM_SRC_ANONYMOUS_THP: + return get_trans_hugepagesz(); + case VM_MEM_SRC_ANONYMOUS_HUGETLB: + case VM_MEM_SRC_SHARED_HUGETLB: + return get_def_hugetlb_pagesz(); + default: + return MAP_HUGE_PAGE_SIZE(flag); + } +} + +bool is_backing_src_hugetlb(uint32_t i) +{ + return !!(vm_mem_backing_src_alias(i)->flag & MAP_HUGETLB); +} + +static void print_available_backing_src_types(const char *prefix) +{ + int i; + + printf("%sAvailable backing src types:\n", prefix); + + for (i = 0; i < NUM_SRC_TYPES; i++) + printf("%s %s\n", prefix, vm_mem_backing_src_alias(i)->name); +} + +void backing_src_help(const char *flag) +{ + printf(" %s: specify the type of memory that should be used to\n" + " back the guest data region. (default: %s)\n", + flag, vm_mem_backing_src_alias(DEFAULT_VM_MEM_SRC)->name); + print_available_backing_src_types(" "); +} + +enum vm_mem_backing_src_type parse_backing_src_type(const char *type_name) +{ + int i; + + for (i = 0; i < NUM_SRC_TYPES; i++) + if (!strcmp(type_name, vm_mem_backing_src_alias(i)->name)) + return i; + + print_available_backing_src_types(""); + TEST_FAIL("Unknown backing src type: %s", type_name); + return -1; +} + +long get_run_delay(void) +{ + char path[64]; + long val[2]; + FILE *fp; + + sprintf(path, "/proc/%ld/schedstat", syscall(SYS_gettid)); + fp = fopen(path, "r"); + /* Return MIN_RUN_DELAY_NS upon failure just to be safe */ + if (fscanf(fp, "%ld %ld ", &val[0], &val[1]) < 2) + val[1] = MIN_RUN_DELAY_NS; + fclose(fp); + + return val[1]; +} + +int atoi_paranoid(const char *num_str) +{ + char *end_ptr; + long num; + + errno = 0; + num = strtol(num_str, &end_ptr, 0); + TEST_ASSERT(!errno, "strtol(\"%s\") failed", num_str); + TEST_ASSERT(num_str != end_ptr, + "strtol(\"%s\") didn't find a valid integer.", num_str); + TEST_ASSERT(*end_ptr == '\0', + "strtol(\"%s\") failed to parse trailing characters \"%s\".", + num_str, end_ptr); + TEST_ASSERT(num >= INT_MIN && num <= INT_MAX, + "%ld not in range of [%d, %d]", num, INT_MIN, INT_MAX); + + return num; +} + +char *strdup_printf(const char *fmt, ...) +{ + va_list ap; + char *str; + + va_start(ap, fmt); + TEST_ASSERT(vasprintf(&str, fmt, ap) >= 0, "vasprintf() failed"); + va_end(ap); + + return str; +} diff --git a/tools/testing/selftests/kvm/lib/ucall_common.c b/tools/testing/selftests/kvm/lib/ucall_common.c new file mode 100644 index 0000000000..816a3fa109 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/ucall_common.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include "kvm_util.h" +#include "linux/types.h" +#include "linux/bitmap.h" +#include "linux/atomic.h" + +#define GUEST_UCALL_FAILED -1 + +struct ucall_header { + DECLARE_BITMAP(in_use, KVM_MAX_VCPUS); + struct ucall ucalls[KVM_MAX_VCPUS]; +}; + +int ucall_nr_pages_required(uint64_t page_size) +{ + return align_up(sizeof(struct ucall_header), page_size) / page_size; +} + +/* + * ucall_pool holds per-VM values (global data is duplicated by each VM), it + * must not be accessed from host code. + */ +static struct ucall_header *ucall_pool; + +void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) +{ + struct ucall_header *hdr; + struct ucall *uc; + vm_vaddr_t vaddr; + int i; + + vaddr = __vm_vaddr_alloc(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR, MEM_REGION_DATA); + hdr = (struct ucall_header *)addr_gva2hva(vm, vaddr); + memset(hdr, 0, sizeof(*hdr)); + + for (i = 0; i < KVM_MAX_VCPUS; ++i) { + uc = &hdr->ucalls[i]; + uc->hva = uc; + } + + write_guest_global(vm, ucall_pool, (struct ucall_header *)vaddr); + + ucall_arch_init(vm, mmio_gpa); +} + +static struct ucall *ucall_alloc(void) +{ + struct ucall *uc; + int i; + + if (!ucall_pool) + goto ucall_failed; + + for (i = 0; i < KVM_MAX_VCPUS; ++i) { + if (!test_and_set_bit(i, ucall_pool->in_use)) { + uc = &ucall_pool->ucalls[i]; + memset(uc->args, 0, sizeof(uc->args)); + return uc; + } + } + +ucall_failed: + /* + * If the vCPU cannot grab a ucall structure, make a bare ucall with a + * magic value to signal to get_ucall() that things went sideways. + * GUEST_ASSERT() depends on ucall_alloc() and so cannot be used here. + */ + ucall_arch_do_ucall(GUEST_UCALL_FAILED); + return NULL; +} + +static void ucall_free(struct ucall *uc) +{ + /* Beware, here be pointer arithmetic. */ + clear_bit(uc - ucall_pool->ucalls, ucall_pool->in_use); +} + +void ucall_assert(uint64_t cmd, const char *exp, const char *file, + unsigned int line, const char *fmt, ...) +{ + struct ucall *uc; + va_list va; + + uc = ucall_alloc(); + uc->cmd = cmd; + + WRITE_ONCE(uc->args[GUEST_ERROR_STRING], (uint64_t)(exp)); + WRITE_ONCE(uc->args[GUEST_FILE], (uint64_t)(file)); + WRITE_ONCE(uc->args[GUEST_LINE], line); + + va_start(va, fmt); + guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va); + va_end(va); + + ucall_arch_do_ucall((vm_vaddr_t)uc->hva); + + ucall_free(uc); +} + +void ucall_fmt(uint64_t cmd, const char *fmt, ...) +{ + struct ucall *uc; + va_list va; + + uc = ucall_alloc(); + uc->cmd = cmd; + + va_start(va, fmt); + guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va); + va_end(va); + + ucall_arch_do_ucall((vm_vaddr_t)uc->hva); + + ucall_free(uc); +} + +void ucall(uint64_t cmd, int nargs, ...) +{ + struct ucall *uc; + va_list va; + int i; + + uc = ucall_alloc(); + + WRITE_ONCE(uc->cmd, cmd); + + nargs = min(nargs, UCALL_MAX_ARGS); + + va_start(va, nargs); + for (i = 0; i < nargs; ++i) + WRITE_ONCE(uc->args[i], va_arg(va, uint64_t)); + va_end(va); + + ucall_arch_do_ucall((vm_vaddr_t)uc->hva); + + ucall_free(uc); +} + +uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) +{ + struct ucall ucall; + void *addr; + + if (!uc) + uc = &ucall; + + addr = ucall_arch_get_ucall(vcpu); + if (addr) { + TEST_ASSERT(addr != (void *)GUEST_UCALL_FAILED, + "Guest failed to allocate ucall struct"); + + memcpy(uc, addr, sizeof(*uc)); + vcpu_run_complete_io(vcpu); + } else { + memset(uc, 0, sizeof(*uc)); + } + + return uc->cmd; +} diff --git a/tools/testing/selftests/kvm/lib/userfaultfd_util.c b/tools/testing/selftests/kvm/lib/userfaultfd_util.c new file mode 100644 index 0000000000..271f638915 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/userfaultfd_util.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * KVM userfaultfd util + * Adapted from demand_paging_test.c + * + * Copyright (C) 2018, Red Hat, Inc. + * Copyright (C) 2019-2022 Google LLC + */ + +#define _GNU_SOURCE /* for pipe2 */ + +#include <inttypes.h> +#include <stdio.h> +#include <stdlib.h> +#include <time.h> +#include <poll.h> +#include <pthread.h> +#include <linux/userfaultfd.h> +#include <sys/syscall.h> + +#include "kvm_util.h" +#include "test_util.h" +#include "memstress.h" +#include "userfaultfd_util.h" + +#ifdef __NR_userfaultfd + +static void *uffd_handler_thread_fn(void *arg) +{ + struct uffd_desc *uffd_desc = (struct uffd_desc *)arg; + int uffd = uffd_desc->uffd; + int pipefd = uffd_desc->pipefds[0]; + useconds_t delay = uffd_desc->delay; + int64_t pages = 0; + struct timespec start; + struct timespec ts_diff; + + clock_gettime(CLOCK_MONOTONIC, &start); + while (1) { + struct uffd_msg msg; + struct pollfd pollfd[2]; + char tmp_chr; + int r; + + pollfd[0].fd = uffd; + pollfd[0].events = POLLIN; + pollfd[1].fd = pipefd; + pollfd[1].events = POLLIN; + + r = poll(pollfd, 2, -1); + switch (r) { + case -1: + pr_info("poll err"); + continue; + case 0: + continue; + case 1: + break; + default: + pr_info("Polling uffd returned %d", r); + return NULL; + } + + if (pollfd[0].revents & POLLERR) { + pr_info("uffd revents has POLLERR"); + return NULL; + } + + if (pollfd[1].revents & POLLIN) { + r = read(pollfd[1].fd, &tmp_chr, 1); + TEST_ASSERT(r == 1, + "Error reading pipefd in UFFD thread\n"); + break; + } + + if (!(pollfd[0].revents & POLLIN)) + continue; + + r = read(uffd, &msg, sizeof(msg)); + if (r == -1) { + if (errno == EAGAIN) + continue; + pr_info("Read of uffd got errno %d\n", errno); + return NULL; + } + + if (r != sizeof(msg)) { + pr_info("Read on uffd returned unexpected size: %d bytes", r); + return NULL; + } + + if (!(msg.event & UFFD_EVENT_PAGEFAULT)) + continue; + + if (delay) + usleep(delay); + r = uffd_desc->handler(uffd_desc->uffd_mode, uffd, &msg); + if (r < 0) + return NULL; + pages++; + } + + ts_diff = timespec_elapsed(start); + PER_VCPU_DEBUG("userfaulted %ld pages over %ld.%.9lds. (%f/sec)\n", + pages, ts_diff.tv_sec, ts_diff.tv_nsec, + pages / ((double)ts_diff.tv_sec + (double)ts_diff.tv_nsec / NSEC_PER_SEC)); + + return NULL; +} + +struct uffd_desc *uffd_setup_demand_paging(int uffd_mode, useconds_t delay, + void *hva, uint64_t len, + uffd_handler_t handler) +{ + struct uffd_desc *uffd_desc; + bool is_minor = (uffd_mode == UFFDIO_REGISTER_MODE_MINOR); + int uffd; + struct uffdio_api uffdio_api; + struct uffdio_register uffdio_register; + uint64_t expected_ioctls = ((uint64_t) 1) << _UFFDIO_COPY; + int ret; + + PER_PAGE_DEBUG("Userfaultfd %s mode, faults resolved with %s\n", + is_minor ? "MINOR" : "MISSING", + is_minor ? "UFFDIO_CONINUE" : "UFFDIO_COPY"); + + uffd_desc = malloc(sizeof(struct uffd_desc)); + TEST_ASSERT(uffd_desc, "malloc failed"); + + /* In order to get minor faults, prefault via the alias. */ + if (is_minor) + expected_ioctls = ((uint64_t) 1) << _UFFDIO_CONTINUE; + + uffd = syscall(__NR_userfaultfd, O_CLOEXEC | O_NONBLOCK); + TEST_ASSERT(uffd >= 0, "uffd creation failed, errno: %d", errno); + + uffdio_api.api = UFFD_API; + uffdio_api.features = 0; + TEST_ASSERT(ioctl(uffd, UFFDIO_API, &uffdio_api) != -1, + "ioctl UFFDIO_API failed: %" PRIu64, + (uint64_t)uffdio_api.api); + + uffdio_register.range.start = (uint64_t)hva; + uffdio_register.range.len = len; + uffdio_register.mode = uffd_mode; + TEST_ASSERT(ioctl(uffd, UFFDIO_REGISTER, &uffdio_register) != -1, + "ioctl UFFDIO_REGISTER failed"); + TEST_ASSERT((uffdio_register.ioctls & expected_ioctls) == + expected_ioctls, "missing userfaultfd ioctls"); + + ret = pipe2(uffd_desc->pipefds, O_CLOEXEC | O_NONBLOCK); + TEST_ASSERT(!ret, "Failed to set up pipefd"); + + uffd_desc->uffd_mode = uffd_mode; + uffd_desc->uffd = uffd; + uffd_desc->delay = delay; + uffd_desc->handler = handler; + pthread_create(&uffd_desc->thread, NULL, uffd_handler_thread_fn, + uffd_desc); + + PER_VCPU_DEBUG("Created uffd thread for HVA range [%p, %p)\n", + hva, hva + len); + + return uffd_desc; +} + +void uffd_stop_demand_paging(struct uffd_desc *uffd) +{ + char c = 0; + int ret; + + ret = write(uffd->pipefds[1], &c, 1); + TEST_ASSERT(ret == 1, "Unable to write to pipefd"); + + ret = pthread_join(uffd->thread, NULL); + TEST_ASSERT(ret == 0, "Pthread_join failed."); + + close(uffd->uffd); + + close(uffd->pipefds[1]); + close(uffd->pipefds[0]); + + free(uffd); +} + +#endif /* __NR_userfaultfd */ diff --git a/tools/testing/selftests/kvm/lib/x86_64/apic.c b/tools/testing/selftests/kvm/lib/x86_64/apic.c new file mode 100644 index 0000000000..89153a333e --- /dev/null +++ b/tools/testing/selftests/kvm/lib/x86_64/apic.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021, Google LLC. + */ + +#include "apic.h" + +void apic_disable(void) +{ + wrmsr(MSR_IA32_APICBASE, + rdmsr(MSR_IA32_APICBASE) & + ~(MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD)); +} + +void xapic_enable(void) +{ + uint64_t val = rdmsr(MSR_IA32_APICBASE); + + /* Per SDM: to enable xAPIC when in x2APIC must first disable APIC */ + if (val & MSR_IA32_APICBASE_EXTD) { + apic_disable(); + wrmsr(MSR_IA32_APICBASE, + rdmsr(MSR_IA32_APICBASE) | MSR_IA32_APICBASE_ENABLE); + } else if (!(val & MSR_IA32_APICBASE_ENABLE)) { + wrmsr(MSR_IA32_APICBASE, val | MSR_IA32_APICBASE_ENABLE); + } + + /* + * Per SDM: reset value of spurious interrupt vector register has the + * APIC software enabled bit=0. It must be enabled in addition to the + * enable bit in the MSR. + */ + val = xapic_read_reg(APIC_SPIV) | APIC_SPIV_APIC_ENABLED; + xapic_write_reg(APIC_SPIV, val); +} + +void x2apic_enable(void) +{ + wrmsr(MSR_IA32_APICBASE, rdmsr(MSR_IA32_APICBASE) | + MSR_IA32_APICBASE_ENABLE | MSR_IA32_APICBASE_EXTD); + x2apic_write_reg(APIC_SPIV, + x2apic_read_reg(APIC_SPIV) | APIC_SPIV_APIC_ENABLED); +} diff --git a/tools/testing/selftests/kvm/lib/x86_64/handlers.S b/tools/testing/selftests/kvm/lib/x86_64/handlers.S new file mode 100644 index 0000000000..7629819734 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/x86_64/handlers.S @@ -0,0 +1,81 @@ +handle_exception: + push %r15 + push %r14 + push %r13 + push %r12 + push %r11 + push %r10 + push %r9 + push %r8 + + push %rdi + push %rsi + push %rbp + push %rbx + push %rdx + push %rcx + push %rax + mov %rsp, %rdi + + call route_exception + + pop %rax + pop %rcx + pop %rdx + pop %rbx + pop %rbp + pop %rsi + pop %rdi + pop %r8 + pop %r9 + pop %r10 + pop %r11 + pop %r12 + pop %r13 + pop %r14 + pop %r15 + + /* Discard vector and error code. */ + add $16, %rsp + iretq + +/* + * Build the handle_exception wrappers which push the vector/error code on the + * stack and an array of pointers to those wrappers. + */ +.pushsection .rodata +.globl idt_handlers +idt_handlers: +.popsection + +.macro HANDLERS has_error from to + vector = \from + .rept \to - \from + 1 + .align 8 + + /* Fetch current address and append it to idt_handlers. */ +666 : +.pushsection .rodata + .quad 666b +.popsection + + .if ! \has_error + pushq $0 + .endif + pushq $vector + jmp handle_exception + vector = vector + 1 + .endr +.endm + +.global idt_handler_code +idt_handler_code: + HANDLERS has_error=0 from=0 to=7 + HANDLERS has_error=1 from=8 to=8 + HANDLERS has_error=0 from=9 to=9 + HANDLERS has_error=1 from=10 to=14 + HANDLERS has_error=0 from=15 to=16 + HANDLERS has_error=1 from=17 to=17 + HANDLERS has_error=0 from=18 to=255 + +.section .note.GNU-stack, "", %progbits diff --git a/tools/testing/selftests/kvm/lib/x86_64/hyperv.c b/tools/testing/selftests/kvm/lib/x86_64/hyperv.c new file mode 100644 index 0000000000..efb7e7a135 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/x86_64/hyperv.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hyper-V specific functions. + * + * Copyright (C) 2021, Red Hat Inc. + */ +#include <stdint.h> +#include "processor.h" +#include "hyperv.h" + +struct hyperv_test_pages *vcpu_alloc_hyperv_test_pages(struct kvm_vm *vm, + vm_vaddr_t *p_hv_pages_gva) +{ + vm_vaddr_t hv_pages_gva = vm_vaddr_alloc_page(vm); + struct hyperv_test_pages *hv = addr_gva2hva(vm, hv_pages_gva); + + /* Setup of a region of guest memory for the VP Assist page. */ + hv->vp_assist = (void *)vm_vaddr_alloc_page(vm); + hv->vp_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->vp_assist); + hv->vp_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->vp_assist); + + /* Setup of a region of guest memory for the partition assist page. */ + hv->partition_assist = (void *)vm_vaddr_alloc_page(vm); + hv->partition_assist_hva = addr_gva2hva(vm, (uintptr_t)hv->partition_assist); + hv->partition_assist_gpa = addr_gva2gpa(vm, (uintptr_t)hv->partition_assist); + + /* Setup of a region of guest memory for the enlightened VMCS. */ + hv->enlightened_vmcs = (void *)vm_vaddr_alloc_page(vm); + hv->enlightened_vmcs_hva = addr_gva2hva(vm, (uintptr_t)hv->enlightened_vmcs); + hv->enlightened_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)hv->enlightened_vmcs); + + *p_hv_pages_gva = hv_pages_gva; + return hv; +} + +int enable_vp_assist(uint64_t vp_assist_pa, void *vp_assist) +{ + uint64_t val = (vp_assist_pa & HV_X64_MSR_VP_ASSIST_PAGE_ADDRESS_MASK) | + HV_X64_MSR_VP_ASSIST_PAGE_ENABLE; + + wrmsr(HV_X64_MSR_VP_ASSIST_PAGE, val); + + current_vp_assist = vp_assist; + + return 0; +} diff --git a/tools/testing/selftests/kvm/lib/x86_64/memstress.c b/tools/testing/selftests/kvm/lib/x86_64/memstress.c new file mode 100644 index 0000000000..d61e623afc --- /dev/null +++ b/tools/testing/selftests/kvm/lib/x86_64/memstress.c @@ -0,0 +1,112 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * x86_64-specific extensions to memstress.c. + * + * Copyright (C) 2022, Google, Inc. + */ +#include <stdio.h> +#include <stdlib.h> +#include <linux/bitmap.h> +#include <linux/bitops.h> + +#include "test_util.h" +#include "kvm_util.h" +#include "memstress.h" +#include "processor.h" +#include "vmx.h" + +void memstress_l2_guest_code(uint64_t vcpu_id) +{ + memstress_guest_code(vcpu_id); + vmcall(); +} + +extern char memstress_l2_guest_entry[]; +__asm__( +"memstress_l2_guest_entry:" +" mov (%rsp), %rdi;" +" call memstress_l2_guest_code;" +" ud2;" +); + +static void memstress_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id) +{ +#define L2_GUEST_STACK_SIZE 64 + unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; + unsigned long *rsp; + + GUEST_ASSERT(vmx->vmcs_gpa); + GUEST_ASSERT(prepare_for_vmx_operation(vmx)); + GUEST_ASSERT(load_vmcs(vmx)); + GUEST_ASSERT(ept_1g_pages_supported()); + + rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1]; + *rsp = vcpu_id; + prepare_vmcs(vmx, memstress_l2_guest_entry, rsp); + + GUEST_ASSERT(!vmlaunch()); + GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL); + GUEST_DONE(); +} + +uint64_t memstress_nested_pages(int nr_vcpus) +{ + /* + * 513 page tables is enough to identity-map 256 TiB of L2 with 1G + * pages and 4-level paging, plus a few pages per-vCPU for data + * structures such as the VMCS. + */ + return 513 + 10 * nr_vcpus; +} + +void memstress_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm) +{ + uint64_t start, end; + + prepare_eptp(vmx, vm, 0); + + /* + * Identity map the first 4G and the test region with 1G pages so that + * KVM can shadow the EPT12 with the maximum huge page size supported + * by the backing source. + */ + nested_identity_map_1g(vmx, vm, 0, 0x100000000ULL); + + start = align_down(memstress_args.gpa, PG_SIZE_1G); + end = align_up(memstress_args.gpa + memstress_args.size, PG_SIZE_1G); + nested_identity_map_1g(vmx, vm, start, end - start); +} + +void memstress_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[]) +{ + struct vmx_pages *vmx, *vmx0 = NULL; + struct kvm_regs regs; + vm_vaddr_t vmx_gva; + int vcpu_id; + + TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX)); + TEST_REQUIRE(kvm_cpu_has_ept()); + + for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) { + vmx = vcpu_alloc_vmx(vm, &vmx_gva); + + if (vcpu_id == 0) { + memstress_setup_ept(vmx, vm); + vmx0 = vmx; + } else { + /* Share the same EPT table across all vCPUs. */ + vmx->eptp = vmx0->eptp; + vmx->eptp_hva = vmx0->eptp_hva; + vmx->eptp_gpa = vmx0->eptp_gpa; + } + + /* + * Override the vCPU to run memstress_l1_guest_code() which will + * bounce it into L2 before calling memstress_guest_code(). + */ + vcpu_regs_get(vcpus[vcpu_id], ®s); + regs.rip = (unsigned long) memstress_l1_guest_code; + vcpu_regs_set(vcpus[vcpu_id], ®s); + vcpu_args_set(vcpus[vcpu_id], 2, vmx_gva, vcpu_id); + } +} diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c new file mode 100644 index 0000000000..d828837407 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -0,0 +1,1301 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * tools/testing/selftests/kvm/lib/x86_64/processor.c + * + * Copyright (C) 2018, Google LLC. + */ + +#include "linux/bitmap.h" +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" + +#ifndef NUM_INTERRUPTS +#define NUM_INTERRUPTS 256 +#endif + +#define DEFAULT_CODE_SELECTOR 0x8 +#define DEFAULT_DATA_SELECTOR 0x10 + +#define MAX_NR_CPUID_ENTRIES 100 + +vm_vaddr_t exception_handlers; +bool host_cpu_is_amd; +bool host_cpu_is_intel; + +static void regs_dump(FILE *stream, struct kvm_regs *regs, uint8_t indent) +{ + fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx " + "rcx: 0x%.16llx rdx: 0x%.16llx\n", + indent, "", + regs->rax, regs->rbx, regs->rcx, regs->rdx); + fprintf(stream, "%*srsi: 0x%.16llx rdi: 0x%.16llx " + "rsp: 0x%.16llx rbp: 0x%.16llx\n", + indent, "", + regs->rsi, regs->rdi, regs->rsp, regs->rbp); + fprintf(stream, "%*sr8: 0x%.16llx r9: 0x%.16llx " + "r10: 0x%.16llx r11: 0x%.16llx\n", + indent, "", + regs->r8, regs->r9, regs->r10, regs->r11); + fprintf(stream, "%*sr12: 0x%.16llx r13: 0x%.16llx " + "r14: 0x%.16llx r15: 0x%.16llx\n", + indent, "", + regs->r12, regs->r13, regs->r14, regs->r15); + fprintf(stream, "%*srip: 0x%.16llx rfl: 0x%.16llx\n", + indent, "", + regs->rip, regs->rflags); +} + +static void segment_dump(FILE *stream, struct kvm_segment *segment, + uint8_t indent) +{ + fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x " + "selector: 0x%.4x type: 0x%.2x\n", + indent, "", segment->base, segment->limit, + segment->selector, segment->type); + fprintf(stream, "%*spresent: 0x%.2x dpl: 0x%.2x " + "db: 0x%.2x s: 0x%.2x l: 0x%.2x\n", + indent, "", segment->present, segment->dpl, + segment->db, segment->s, segment->l); + fprintf(stream, "%*sg: 0x%.2x avl: 0x%.2x " + "unusable: 0x%.2x padding: 0x%.2x\n", + indent, "", segment->g, segment->avl, + segment->unusable, segment->padding); +} + +static void dtable_dump(FILE *stream, struct kvm_dtable *dtable, + uint8_t indent) +{ + fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x " + "padding: 0x%.4x 0x%.4x 0x%.4x\n", + indent, "", dtable->base, dtable->limit, + dtable->padding[0], dtable->padding[1], dtable->padding[2]); +} + +static void sregs_dump(FILE *stream, struct kvm_sregs *sregs, uint8_t indent) +{ + unsigned int i; + + fprintf(stream, "%*scs:\n", indent, ""); + segment_dump(stream, &sregs->cs, indent + 2); + fprintf(stream, "%*sds:\n", indent, ""); + segment_dump(stream, &sregs->ds, indent + 2); + fprintf(stream, "%*ses:\n", indent, ""); + segment_dump(stream, &sregs->es, indent + 2); + fprintf(stream, "%*sfs:\n", indent, ""); + segment_dump(stream, &sregs->fs, indent + 2); + fprintf(stream, "%*sgs:\n", indent, ""); + segment_dump(stream, &sregs->gs, indent + 2); + fprintf(stream, "%*sss:\n", indent, ""); + segment_dump(stream, &sregs->ss, indent + 2); + fprintf(stream, "%*str:\n", indent, ""); + segment_dump(stream, &sregs->tr, indent + 2); + fprintf(stream, "%*sldt:\n", indent, ""); + segment_dump(stream, &sregs->ldt, indent + 2); + + fprintf(stream, "%*sgdt:\n", indent, ""); + dtable_dump(stream, &sregs->gdt, indent + 2); + fprintf(stream, "%*sidt:\n", indent, ""); + dtable_dump(stream, &sregs->idt, indent + 2); + + fprintf(stream, "%*scr0: 0x%.16llx cr2: 0x%.16llx " + "cr3: 0x%.16llx cr4: 0x%.16llx\n", + indent, "", + sregs->cr0, sregs->cr2, sregs->cr3, sregs->cr4); + fprintf(stream, "%*scr8: 0x%.16llx efer: 0x%.16llx " + "apic_base: 0x%.16llx\n", + indent, "", + sregs->cr8, sregs->efer, sregs->apic_base); + + fprintf(stream, "%*sinterrupt_bitmap:\n", indent, ""); + for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++) { + fprintf(stream, "%*s%.16llx\n", indent + 2, "", + sregs->interrupt_bitmap[i]); + } +} + +bool kvm_is_tdp_enabled(void) +{ + if (host_cpu_is_intel) + return get_kvm_intel_param_bool("ept"); + else + return get_kvm_amd_param_bool("npt"); +} + +void virt_arch_pgd_alloc(struct kvm_vm *vm) +{ + TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " + "unknown or unsupported guest mode, mode: 0x%x", vm->mode); + + /* If needed, create page map l4 table. */ + if (!vm->pgd_created) { + vm->pgd = vm_alloc_page_table(vm); + vm->pgd_created = true; + } +} + +static void *virt_get_pte(struct kvm_vm *vm, uint64_t *parent_pte, + uint64_t vaddr, int level) +{ + uint64_t pt_gpa = PTE_GET_PA(*parent_pte); + uint64_t *page_table = addr_gpa2hva(vm, pt_gpa); + int index = (vaddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu; + + TEST_ASSERT((*parent_pte & PTE_PRESENT_MASK) || parent_pte == &vm->pgd, + "Parent PTE (level %d) not PRESENT for gva: 0x%08lx", + level + 1, vaddr); + + return &page_table[index]; +} + +static uint64_t *virt_create_upper_pte(struct kvm_vm *vm, + uint64_t *parent_pte, + uint64_t vaddr, + uint64_t paddr, + int current_level, + int target_level) +{ + uint64_t *pte = virt_get_pte(vm, parent_pte, vaddr, current_level); + + if (!(*pte & PTE_PRESENT_MASK)) { + *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK; + if (current_level == target_level) + *pte |= PTE_LARGE_MASK | (paddr & PHYSICAL_PAGE_MASK); + else + *pte |= vm_alloc_page_table(vm) & PHYSICAL_PAGE_MASK; + } else { + /* + * Entry already present. Assert that the caller doesn't want + * a hugepage at this level, and that there isn't a hugepage at + * this level. + */ + TEST_ASSERT(current_level != target_level, + "Cannot create hugepage at level: %u, vaddr: 0x%lx\n", + current_level, vaddr); + TEST_ASSERT(!(*pte & PTE_LARGE_MASK), + "Cannot create page table at level: %u, vaddr: 0x%lx\n", + current_level, vaddr); + } + return pte; +} + +void __virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, int level) +{ + const uint64_t pg_size = PG_LEVEL_SIZE(level); + uint64_t *pml4e, *pdpe, *pde; + uint64_t *pte; + + TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, + "Unknown or unsupported guest mode, mode: 0x%x", vm->mode); + + TEST_ASSERT((vaddr % pg_size) == 0, + "Virtual address not aligned,\n" + "vaddr: 0x%lx page size: 0x%lx", vaddr, pg_size); + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, (vaddr >> vm->page_shift)), + "Invalid virtual address, vaddr: 0x%lx", vaddr); + TEST_ASSERT((paddr % pg_size) == 0, + "Physical address not aligned,\n" + " paddr: 0x%lx page size: 0x%lx", paddr, pg_size); + TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, + "Physical address beyond maximum supported,\n" + " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", + paddr, vm->max_gfn, vm->page_size); + + /* + * Allocate upper level page tables, if not already present. Return + * early if a hugepage was created. + */ + pml4e = virt_create_upper_pte(vm, &vm->pgd, vaddr, paddr, PG_LEVEL_512G, level); + if (*pml4e & PTE_LARGE_MASK) + return; + + pdpe = virt_create_upper_pte(vm, pml4e, vaddr, paddr, PG_LEVEL_1G, level); + if (*pdpe & PTE_LARGE_MASK) + return; + + pde = virt_create_upper_pte(vm, pdpe, vaddr, paddr, PG_LEVEL_2M, level); + if (*pde & PTE_LARGE_MASK) + return; + + /* Fill in page table entry. */ + pte = virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K); + TEST_ASSERT(!(*pte & PTE_PRESENT_MASK), + "PTE already present for 4k page at vaddr: 0x%lx\n", vaddr); + *pte = PTE_PRESENT_MASK | PTE_WRITABLE_MASK | (paddr & PHYSICAL_PAGE_MASK); +} + +void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) +{ + __virt_pg_map(vm, vaddr, paddr, PG_LEVEL_4K); +} + +void virt_map_level(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, + uint64_t nr_bytes, int level) +{ + uint64_t pg_size = PG_LEVEL_SIZE(level); + uint64_t nr_pages = nr_bytes / pg_size; + int i; + + TEST_ASSERT(nr_bytes % pg_size == 0, + "Region size not aligned: nr_bytes: 0x%lx, page size: 0x%lx", + nr_bytes, pg_size); + + for (i = 0; i < nr_pages; i++) { + __virt_pg_map(vm, vaddr, paddr, level); + + vaddr += pg_size; + paddr += pg_size; + } +} + +static bool vm_is_target_pte(uint64_t *pte, int *level, int current_level) +{ + if (*pte & PTE_LARGE_MASK) { + TEST_ASSERT(*level == PG_LEVEL_NONE || + *level == current_level, + "Unexpected hugepage at level %d\n", current_level); + *level = current_level; + } + + return *level == current_level; +} + +uint64_t *__vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr, + int *level) +{ + uint64_t *pml4e, *pdpe, *pde; + + TEST_ASSERT(*level >= PG_LEVEL_NONE && *level < PG_LEVEL_NUM, + "Invalid PG_LEVEL_* '%d'", *level); + + TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " + "unknown or unsupported guest mode, mode: 0x%x", vm->mode); + TEST_ASSERT(sparsebit_is_set(vm->vpages_valid, + (vaddr >> vm->page_shift)), + "Invalid virtual address, vaddr: 0x%lx", + vaddr); + /* + * Based on the mode check above there are 48 bits in the vaddr, so + * shift 16 to sign extend the last bit (bit-47), + */ + TEST_ASSERT(vaddr == (((int64_t)vaddr << 16) >> 16), + "Canonical check failed. The virtual address is invalid."); + + pml4e = virt_get_pte(vm, &vm->pgd, vaddr, PG_LEVEL_512G); + if (vm_is_target_pte(pml4e, level, PG_LEVEL_512G)) + return pml4e; + + pdpe = virt_get_pte(vm, pml4e, vaddr, PG_LEVEL_1G); + if (vm_is_target_pte(pdpe, level, PG_LEVEL_1G)) + return pdpe; + + pde = virt_get_pte(vm, pdpe, vaddr, PG_LEVEL_2M); + if (vm_is_target_pte(pde, level, PG_LEVEL_2M)) + return pde; + + return virt_get_pte(vm, pde, vaddr, PG_LEVEL_4K); +} + +uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr) +{ + int level = PG_LEVEL_4K; + + return __vm_get_page_table_entry(vm, vaddr, &level); +} + +void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) +{ + uint64_t *pml4e, *pml4e_start; + uint64_t *pdpe, *pdpe_start; + uint64_t *pde, *pde_start; + uint64_t *pte, *pte_start; + + if (!vm->pgd_created) + return; + + fprintf(stream, "%*s " + " no\n", indent, ""); + fprintf(stream, "%*s index hvaddr gpaddr " + "addr w exec dirty\n", + indent, ""); + pml4e_start = (uint64_t *) addr_gpa2hva(vm, vm->pgd); + for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) { + pml4e = &pml4e_start[n1]; + if (!(*pml4e & PTE_PRESENT_MASK)) + continue; + fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10llx %u " + " %u\n", + indent, "", + pml4e - pml4e_start, pml4e, + addr_hva2gpa(vm, pml4e), PTE_GET_PFN(*pml4e), + !!(*pml4e & PTE_WRITABLE_MASK), !!(*pml4e & PTE_NX_MASK)); + + pdpe_start = addr_gpa2hva(vm, *pml4e & PHYSICAL_PAGE_MASK); + for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) { + pdpe = &pdpe_start[n2]; + if (!(*pdpe & PTE_PRESENT_MASK)) + continue; + fprintf(stream, "%*spdpe 0x%-3zx %p 0x%-12lx 0x%-10llx " + "%u %u\n", + indent, "", + pdpe - pdpe_start, pdpe, + addr_hva2gpa(vm, pdpe), + PTE_GET_PFN(*pdpe), !!(*pdpe & PTE_WRITABLE_MASK), + !!(*pdpe & PTE_NX_MASK)); + + pde_start = addr_gpa2hva(vm, *pdpe & PHYSICAL_PAGE_MASK); + for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) { + pde = &pde_start[n3]; + if (!(*pde & PTE_PRESENT_MASK)) + continue; + fprintf(stream, "%*spde 0x%-3zx %p " + "0x%-12lx 0x%-10llx %u %u\n", + indent, "", pde - pde_start, pde, + addr_hva2gpa(vm, pde), + PTE_GET_PFN(*pde), !!(*pde & PTE_WRITABLE_MASK), + !!(*pde & PTE_NX_MASK)); + + pte_start = addr_gpa2hva(vm, *pde & PHYSICAL_PAGE_MASK); + for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) { + pte = &pte_start[n4]; + if (!(*pte & PTE_PRESENT_MASK)) + continue; + fprintf(stream, "%*spte 0x%-3zx %p " + "0x%-12lx 0x%-10llx %u %u " + " %u 0x%-10lx\n", + indent, "", + pte - pte_start, pte, + addr_hva2gpa(vm, pte), + PTE_GET_PFN(*pte), + !!(*pte & PTE_WRITABLE_MASK), + !!(*pte & PTE_NX_MASK), + !!(*pte & PTE_DIRTY_MASK), + ((uint64_t) n1 << 27) + | ((uint64_t) n2 << 18) + | ((uint64_t) n3 << 9) + | ((uint64_t) n4)); + } + } + } + } +} + +/* + * Set Unusable Segment + * + * Input Args: None + * + * Output Args: + * segp - Pointer to segment register + * + * Return: None + * + * Sets the segment register pointed to by @segp to an unusable state. + */ +static void kvm_seg_set_unusable(struct kvm_segment *segp) +{ + memset(segp, 0, sizeof(*segp)); + segp->unusable = true; +} + +static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp) +{ + void *gdt = addr_gva2hva(vm, vm->gdt); + struct desc64 *desc = gdt + (segp->selector >> 3) * 8; + + desc->limit0 = segp->limit & 0xFFFF; + desc->base0 = segp->base & 0xFFFF; + desc->base1 = segp->base >> 16; + desc->type = segp->type; + desc->s = segp->s; + desc->dpl = segp->dpl; + desc->p = segp->present; + desc->limit1 = segp->limit >> 16; + desc->avl = segp->avl; + desc->l = segp->l; + desc->db = segp->db; + desc->g = segp->g; + desc->base2 = segp->base >> 24; + if (!segp->s) + desc->base3 = segp->base >> 32; +} + + +/* + * Set Long Mode Flat Kernel Code Segment + * + * Input Args: + * vm - VM whose GDT is being filled, or NULL to only write segp + * selector - selector value + * + * Output Args: + * segp - Pointer to KVM segment + * + * Return: None + * + * Sets up the KVM segment pointed to by @segp, to be a code segment + * with the selector value given by @selector. + */ +static void kvm_seg_set_kernel_code_64bit(struct kvm_vm *vm, uint16_t selector, + struct kvm_segment *segp) +{ + memset(segp, 0, sizeof(*segp)); + segp->selector = selector; + segp->limit = 0xFFFFFFFFu; + segp->s = 0x1; /* kTypeCodeData */ + segp->type = 0x08 | 0x01 | 0x02; /* kFlagCode | kFlagCodeAccessed + * | kFlagCodeReadable + */ + segp->g = true; + segp->l = true; + segp->present = 1; + if (vm) + kvm_seg_fill_gdt_64bit(vm, segp); +} + +/* + * Set Long Mode Flat Kernel Data Segment + * + * Input Args: + * vm - VM whose GDT is being filled, or NULL to only write segp + * selector - selector value + * + * Output Args: + * segp - Pointer to KVM segment + * + * Return: None + * + * Sets up the KVM segment pointed to by @segp, to be a data segment + * with the selector value given by @selector. + */ +static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector, + struct kvm_segment *segp) +{ + memset(segp, 0, sizeof(*segp)); + segp->selector = selector; + segp->limit = 0xFFFFFFFFu; + segp->s = 0x1; /* kTypeCodeData */ + segp->type = 0x00 | 0x01 | 0x02; /* kFlagData | kFlagDataAccessed + * | kFlagDataWritable + */ + segp->g = true; + segp->present = true; + if (vm) + kvm_seg_fill_gdt_64bit(vm, segp); +} + +vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) +{ + int level = PG_LEVEL_NONE; + uint64_t *pte = __vm_get_page_table_entry(vm, gva, &level); + + TEST_ASSERT(*pte & PTE_PRESENT_MASK, + "Leaf PTE not PRESENT for gva: 0x%08lx", gva); + + /* + * No need for a hugepage mask on the PTE, x86-64 requires the "unused" + * address bits to be zero. + */ + return PTE_GET_PA(*pte) | (gva & ~HUGEPAGE_MASK(level)); +} + +static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt) +{ + if (!vm->gdt) + vm->gdt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); + + dt->base = vm->gdt; + dt->limit = getpagesize(); +} + +static void kvm_setup_tss_64bit(struct kvm_vm *vm, struct kvm_segment *segp, + int selector) +{ + if (!vm->tss) + vm->tss = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); + + memset(segp, 0, sizeof(*segp)); + segp->base = vm->tss; + segp->limit = 0x67; + segp->selector = selector; + segp->type = 0xb; + segp->present = 1; + kvm_seg_fill_gdt_64bit(vm, segp); +} + +static void vcpu_setup(struct kvm_vm *vm, struct kvm_vcpu *vcpu) +{ + struct kvm_sregs sregs; + + /* Set mode specific system register values. */ + vcpu_sregs_get(vcpu, &sregs); + + sregs.idt.limit = 0; + + kvm_setup_gdt(vm, &sregs.gdt); + + switch (vm->mode) { + case VM_MODE_PXXV48_4K: + sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG; + sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR; + sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX); + + kvm_seg_set_unusable(&sregs.ldt); + kvm_seg_set_kernel_code_64bit(vm, DEFAULT_CODE_SELECTOR, &sregs.cs); + kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.ds); + kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.es); + kvm_setup_tss_64bit(vm, &sregs.tr, 0x18); + break; + + default: + TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode); + } + + sregs.cr3 = vm->pgd; + vcpu_sregs_set(vcpu, &sregs); +} + +void kvm_arch_vm_post_create(struct kvm_vm *vm) +{ + vm_create_irqchip(vm); + sync_global_to_guest(vm, host_cpu_is_intel); + sync_global_to_guest(vm, host_cpu_is_amd); +} + +struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, + void *guest_code) +{ + struct kvm_mp_state mp_state; + struct kvm_regs regs; + vm_vaddr_t stack_vaddr; + struct kvm_vcpu *vcpu; + + stack_vaddr = __vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(), + DEFAULT_GUEST_STACK_VADDR_MIN, + MEM_REGION_DATA); + + stack_vaddr += DEFAULT_STACK_PGS * getpagesize(); + + /* + * Align stack to match calling sequence requirements in section "The + * Stack Frame" of the System V ABI AMD64 Architecture Processor + * Supplement, which requires the value (%rsp + 8) to be a multiple of + * 16 when control is transferred to the function entry point. + * + * If this code is ever used to launch a vCPU with 32-bit entry point it + * may need to subtract 4 bytes instead of 8 bytes. + */ + TEST_ASSERT(IS_ALIGNED(stack_vaddr, PAGE_SIZE), + "__vm_vaddr_alloc() did not provide a page-aligned address"); + stack_vaddr -= 8; + + vcpu = __vm_vcpu_add(vm, vcpu_id); + vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid()); + vcpu_setup(vm, vcpu); + + /* Setup guest general purpose registers */ + vcpu_regs_get(vcpu, ®s); + regs.rflags = regs.rflags | 0x2; + regs.rsp = stack_vaddr; + regs.rip = (unsigned long) guest_code; + vcpu_regs_set(vcpu, ®s); + + /* Setup the MP state */ + mp_state.mp_state = 0; + vcpu_mp_state_set(vcpu, &mp_state); + + return vcpu; +} + +struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id) +{ + struct kvm_vcpu *vcpu = __vm_vcpu_add(vm, vcpu_id); + + vcpu_init_cpuid(vcpu, kvm_get_supported_cpuid()); + + return vcpu; +} + +void vcpu_arch_free(struct kvm_vcpu *vcpu) +{ + if (vcpu->cpuid) + free(vcpu->cpuid); +} + +/* Do not use kvm_supported_cpuid directly except for validity checks. */ +static void *kvm_supported_cpuid; + +const struct kvm_cpuid2 *kvm_get_supported_cpuid(void) +{ + int kvm_fd; + + if (kvm_supported_cpuid) + return kvm_supported_cpuid; + + kvm_supported_cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES); + kvm_fd = open_kvm_dev_path_or_exit(); + + kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, + (struct kvm_cpuid2 *)kvm_supported_cpuid); + + close(kvm_fd); + return kvm_supported_cpuid; +} + +static uint32_t __kvm_cpu_has(const struct kvm_cpuid2 *cpuid, + uint32_t function, uint32_t index, + uint8_t reg, uint8_t lo, uint8_t hi) +{ + const struct kvm_cpuid_entry2 *entry; + int i; + + for (i = 0; i < cpuid->nent; i++) { + entry = &cpuid->entries[i]; + + /* + * The output registers in kvm_cpuid_entry2 are in alphabetical + * order, but kvm_x86_cpu_feature matches that mess, so yay + * pointer shenanigans! + */ + if (entry->function == function && entry->index == index) + return ((&entry->eax)[reg] & GENMASK(hi, lo)) >> lo; + } + + return 0; +} + +bool kvm_cpuid_has(const struct kvm_cpuid2 *cpuid, + struct kvm_x86_cpu_feature feature) +{ + return __kvm_cpu_has(cpuid, feature.function, feature.index, + feature.reg, feature.bit, feature.bit); +} + +uint32_t kvm_cpuid_property(const struct kvm_cpuid2 *cpuid, + struct kvm_x86_cpu_property property) +{ + return __kvm_cpu_has(cpuid, property.function, property.index, + property.reg, property.lo_bit, property.hi_bit); +} + +uint64_t kvm_get_feature_msr(uint64_t msr_index) +{ + struct { + struct kvm_msrs header; + struct kvm_msr_entry entry; + } buffer = {}; + int r, kvm_fd; + + buffer.header.nmsrs = 1; + buffer.entry.index = msr_index; + kvm_fd = open_kvm_dev_path_or_exit(); + + r = __kvm_ioctl(kvm_fd, KVM_GET_MSRS, &buffer.header); + TEST_ASSERT(r == 1, KVM_IOCTL_ERROR(KVM_GET_MSRS, r)); + + close(kvm_fd); + return buffer.entry.data; +} + +void __vm_xsave_require_permission(uint64_t xfeature, const char *name) +{ + int kvm_fd; + u64 bitmask; + long rc; + struct kvm_device_attr attr = { + .group = 0, + .attr = KVM_X86_XCOMP_GUEST_SUPP, + .addr = (unsigned long) &bitmask, + }; + + TEST_ASSERT(!kvm_supported_cpuid, + "kvm_get_supported_cpuid() cannot be used before ARCH_REQ_XCOMP_GUEST_PERM"); + + TEST_ASSERT(is_power_of_2(xfeature), + "Dynamic XFeatures must be enabled one at a time"); + + kvm_fd = open_kvm_dev_path_or_exit(); + rc = __kvm_ioctl(kvm_fd, KVM_GET_DEVICE_ATTR, &attr); + close(kvm_fd); + + if (rc == -1 && (errno == ENXIO || errno == EINVAL)) + __TEST_REQUIRE(0, "KVM_X86_XCOMP_GUEST_SUPP not supported"); + + TEST_ASSERT(rc == 0, "KVM_GET_DEVICE_ATTR(0, KVM_X86_XCOMP_GUEST_SUPP) error: %ld", rc); + + __TEST_REQUIRE(bitmask & xfeature, + "Required XSAVE feature '%s' not supported", name); + + TEST_REQUIRE(!syscall(SYS_arch_prctl, ARCH_REQ_XCOMP_GUEST_PERM, ilog2(xfeature))); + + rc = syscall(SYS_arch_prctl, ARCH_GET_XCOMP_GUEST_PERM, &bitmask); + TEST_ASSERT(rc == 0, "prctl(ARCH_GET_XCOMP_GUEST_PERM) error: %ld", rc); + TEST_ASSERT(bitmask & xfeature, + "'%s' (0x%lx) not permitted after prctl(ARCH_REQ_XCOMP_GUEST_PERM) permitted=0x%lx", + name, xfeature, bitmask); +} + +void vcpu_init_cpuid(struct kvm_vcpu *vcpu, const struct kvm_cpuid2 *cpuid) +{ + TEST_ASSERT(cpuid != vcpu->cpuid, "@cpuid can't be the vCPU's CPUID"); + + /* Allow overriding the default CPUID. */ + if (vcpu->cpuid && vcpu->cpuid->nent < cpuid->nent) { + free(vcpu->cpuid); + vcpu->cpuid = NULL; + } + + if (!vcpu->cpuid) + vcpu->cpuid = allocate_kvm_cpuid2(cpuid->nent); + + memcpy(vcpu->cpuid, cpuid, kvm_cpuid2_size(cpuid->nent)); + vcpu_set_cpuid(vcpu); +} + +void vcpu_set_cpuid_maxphyaddr(struct kvm_vcpu *vcpu, uint8_t maxphyaddr) +{ + struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, 0x80000008); + + entry->eax = (entry->eax & ~0xff) | maxphyaddr; + vcpu_set_cpuid(vcpu); +} + +void vcpu_clear_cpuid_entry(struct kvm_vcpu *vcpu, uint32_t function) +{ + struct kvm_cpuid_entry2 *entry = vcpu_get_cpuid_entry(vcpu, function); + + entry->eax = 0; + entry->ebx = 0; + entry->ecx = 0; + entry->edx = 0; + vcpu_set_cpuid(vcpu); +} + +void vcpu_set_or_clear_cpuid_feature(struct kvm_vcpu *vcpu, + struct kvm_x86_cpu_feature feature, + bool set) +{ + struct kvm_cpuid_entry2 *entry; + u32 *reg; + + entry = __vcpu_get_cpuid_entry(vcpu, feature.function, feature.index); + reg = (&entry->eax) + feature.reg; + + if (set) + *reg |= BIT(feature.bit); + else + *reg &= ~BIT(feature.bit); + + vcpu_set_cpuid(vcpu); +} + +uint64_t vcpu_get_msr(struct kvm_vcpu *vcpu, uint64_t msr_index) +{ + struct { + struct kvm_msrs header; + struct kvm_msr_entry entry; + } buffer = {}; + + buffer.header.nmsrs = 1; + buffer.entry.index = msr_index; + + vcpu_msrs_get(vcpu, &buffer.header); + + return buffer.entry.data; +} + +int _vcpu_set_msr(struct kvm_vcpu *vcpu, uint64_t msr_index, uint64_t msr_value) +{ + struct { + struct kvm_msrs header; + struct kvm_msr_entry entry; + } buffer = {}; + + memset(&buffer, 0, sizeof(buffer)); + buffer.header.nmsrs = 1; + buffer.entry.index = msr_index; + buffer.entry.data = msr_value; + + return __vcpu_ioctl(vcpu, KVM_SET_MSRS, &buffer.header); +} + +void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...) +{ + va_list ap; + struct kvm_regs regs; + + TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n" + " num: %u\n", + num); + + va_start(ap, num); + vcpu_regs_get(vcpu, ®s); + + if (num >= 1) + regs.rdi = va_arg(ap, uint64_t); + + if (num >= 2) + regs.rsi = va_arg(ap, uint64_t); + + if (num >= 3) + regs.rdx = va_arg(ap, uint64_t); + + if (num >= 4) + regs.rcx = va_arg(ap, uint64_t); + + if (num >= 5) + regs.r8 = va_arg(ap, uint64_t); + + if (num >= 6) + regs.r9 = va_arg(ap, uint64_t); + + vcpu_regs_set(vcpu, ®s); + va_end(ap); +} + +void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent) +{ + struct kvm_regs regs; + struct kvm_sregs sregs; + + fprintf(stream, "%*svCPU ID: %u\n", indent, "", vcpu->id); + + fprintf(stream, "%*sregs:\n", indent + 2, ""); + vcpu_regs_get(vcpu, ®s); + regs_dump(stream, ®s, indent + 4); + + fprintf(stream, "%*ssregs:\n", indent + 2, ""); + vcpu_sregs_get(vcpu, &sregs); + sregs_dump(stream, &sregs, indent + 4); +} + +static struct kvm_msr_list *__kvm_get_msr_index_list(bool feature_msrs) +{ + struct kvm_msr_list *list; + struct kvm_msr_list nmsrs; + int kvm_fd, r; + + kvm_fd = open_kvm_dev_path_or_exit(); + + nmsrs.nmsrs = 0; + if (!feature_msrs) + r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs); + else + r = __kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, &nmsrs); + + TEST_ASSERT(r == -1 && errno == E2BIG, + "Expected -E2BIG, got rc: %i errno: %i (%s)", + r, errno, strerror(errno)); + + list = malloc(sizeof(*list) + nmsrs.nmsrs * sizeof(list->indices[0])); + TEST_ASSERT(list, "-ENOMEM when allocating MSR index list"); + list->nmsrs = nmsrs.nmsrs; + + if (!feature_msrs) + kvm_ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list); + else + kvm_ioctl(kvm_fd, KVM_GET_MSR_FEATURE_INDEX_LIST, list); + close(kvm_fd); + + TEST_ASSERT(list->nmsrs == nmsrs.nmsrs, + "Number of MSRs in list changed, was %d, now %d", + nmsrs.nmsrs, list->nmsrs); + return list; +} + +const struct kvm_msr_list *kvm_get_msr_index_list(void) +{ + static const struct kvm_msr_list *list; + + if (!list) + list = __kvm_get_msr_index_list(false); + return list; +} + + +const struct kvm_msr_list *kvm_get_feature_msr_index_list(void) +{ + static const struct kvm_msr_list *list; + + if (!list) + list = __kvm_get_msr_index_list(true); + return list; +} + +bool kvm_msr_is_in_save_restore_list(uint32_t msr_index) +{ + const struct kvm_msr_list *list = kvm_get_msr_index_list(); + int i; + + for (i = 0; i < list->nmsrs; ++i) { + if (list->indices[i] == msr_index) + return true; + } + + return false; +} + +static void vcpu_save_xsave_state(struct kvm_vcpu *vcpu, + struct kvm_x86_state *state) +{ + int size = vm_check_cap(vcpu->vm, KVM_CAP_XSAVE2); + + if (size) { + state->xsave = malloc(size); + vcpu_xsave2_get(vcpu, state->xsave); + } else { + state->xsave = malloc(sizeof(struct kvm_xsave)); + vcpu_xsave_get(vcpu, state->xsave); + } +} + +struct kvm_x86_state *vcpu_save_state(struct kvm_vcpu *vcpu) +{ + const struct kvm_msr_list *msr_list = kvm_get_msr_index_list(); + struct kvm_x86_state *state; + int i; + + static int nested_size = -1; + + if (nested_size == -1) { + nested_size = kvm_check_cap(KVM_CAP_NESTED_STATE); + TEST_ASSERT(nested_size <= sizeof(state->nested_), + "Nested state size too big, %i > %zi", + nested_size, sizeof(state->nested_)); + } + + /* + * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees + * guest state is consistent only after userspace re-enters the + * kernel with KVM_RUN. Complete IO prior to migrating state + * to a new VM. + */ + vcpu_run_complete_io(vcpu); + + state = malloc(sizeof(*state) + msr_list->nmsrs * sizeof(state->msrs.entries[0])); + TEST_ASSERT(state, "-ENOMEM when allocating kvm state"); + + vcpu_events_get(vcpu, &state->events); + vcpu_mp_state_get(vcpu, &state->mp_state); + vcpu_regs_get(vcpu, &state->regs); + vcpu_save_xsave_state(vcpu, state); + + if (kvm_has_cap(KVM_CAP_XCRS)) + vcpu_xcrs_get(vcpu, &state->xcrs); + + vcpu_sregs_get(vcpu, &state->sregs); + + if (nested_size) { + state->nested.size = sizeof(state->nested_); + + vcpu_nested_state_get(vcpu, &state->nested); + TEST_ASSERT(state->nested.size <= nested_size, + "Nested state size too big, %i (KVM_CHECK_CAP gave %i)", + state->nested.size, nested_size); + } else { + state->nested.size = 0; + } + + state->msrs.nmsrs = msr_list->nmsrs; + for (i = 0; i < msr_list->nmsrs; i++) + state->msrs.entries[i].index = msr_list->indices[i]; + vcpu_msrs_get(vcpu, &state->msrs); + + vcpu_debugregs_get(vcpu, &state->debugregs); + + return state; +} + +void vcpu_load_state(struct kvm_vcpu *vcpu, struct kvm_x86_state *state) +{ + vcpu_sregs_set(vcpu, &state->sregs); + vcpu_msrs_set(vcpu, &state->msrs); + + if (kvm_has_cap(KVM_CAP_XCRS)) + vcpu_xcrs_set(vcpu, &state->xcrs); + + vcpu_xsave_set(vcpu, state->xsave); + vcpu_events_set(vcpu, &state->events); + vcpu_mp_state_set(vcpu, &state->mp_state); + vcpu_debugregs_set(vcpu, &state->debugregs); + vcpu_regs_set(vcpu, &state->regs); + + if (state->nested.size) + vcpu_nested_state_set(vcpu, &state->nested); +} + +void kvm_x86_state_cleanup(struct kvm_x86_state *state) +{ + free(state->xsave); + free(state); +} + +void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits) +{ + if (!kvm_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR)) { + *pa_bits = kvm_cpu_has(X86_FEATURE_PAE) ? 36 : 32; + *va_bits = 32; + } else { + *pa_bits = kvm_cpu_property(X86_PROPERTY_MAX_PHY_ADDR); + *va_bits = kvm_cpu_property(X86_PROPERTY_MAX_VIRT_ADDR); + } +} + +static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr, + int dpl, unsigned short selector) +{ + struct idt_entry *base = + (struct idt_entry *)addr_gva2hva(vm, vm->idt); + struct idt_entry *e = &base[vector]; + + memset(e, 0, sizeof(*e)); + e->offset0 = addr; + e->selector = selector; + e->ist = 0; + e->type = 14; + e->dpl = dpl; + e->p = 1; + e->offset1 = addr >> 16; + e->offset2 = addr >> 32; +} + + +static bool kvm_fixup_exception(struct ex_regs *regs) +{ + if (regs->r9 != KVM_EXCEPTION_MAGIC || regs->rip != regs->r10) + return false; + + if (regs->vector == DE_VECTOR) + return false; + + regs->rip = regs->r11; + regs->r9 = regs->vector; + regs->r10 = regs->error_code; + return true; +} + +void route_exception(struct ex_regs *regs) +{ + typedef void(*handler)(struct ex_regs *); + handler *handlers = (handler *)exception_handlers; + + if (handlers && handlers[regs->vector]) { + handlers[regs->vector](regs); + return; + } + + if (kvm_fixup_exception(regs)) + return; + + ucall_assert(UCALL_UNHANDLED, + "Unhandled exception in guest", __FILE__, __LINE__, + "Unhandled exception '0x%lx' at guest RIP '0x%lx'", + regs->vector, regs->rip); +} + +void vm_init_descriptor_tables(struct kvm_vm *vm) +{ + extern void *idt_handlers; + int i; + + vm->idt = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); + vm->handlers = __vm_vaddr_alloc_page(vm, MEM_REGION_DATA); + /* Handlers have the same address in both address spaces.*/ + for (i = 0; i < NUM_INTERRUPTS; i++) + set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0, + DEFAULT_CODE_SELECTOR); +} + +void vcpu_init_descriptor_tables(struct kvm_vcpu *vcpu) +{ + struct kvm_vm *vm = vcpu->vm; + struct kvm_sregs sregs; + + vcpu_sregs_get(vcpu, &sregs); + sregs.idt.base = vm->idt; + sregs.idt.limit = NUM_INTERRUPTS * sizeof(struct idt_entry) - 1; + sregs.gdt.base = vm->gdt; + sregs.gdt.limit = getpagesize() - 1; + kvm_seg_set_kernel_data_64bit(NULL, DEFAULT_DATA_SELECTOR, &sregs.gs); + vcpu_sregs_set(vcpu, &sregs); + *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers; +} + +void vm_install_exception_handler(struct kvm_vm *vm, int vector, + void (*handler)(struct ex_regs *)) +{ + vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers); + + handlers[vector] = (vm_vaddr_t)handler; +} + +void assert_on_unhandled_exception(struct kvm_vcpu *vcpu) +{ + struct ucall uc; + + if (get_ucall(vcpu, &uc) == UCALL_UNHANDLED) + REPORT_GUEST_ASSERT(uc); +} + +const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid, + uint32_t function, uint32_t index) +{ + int i; + + for (i = 0; i < cpuid->nent; i++) { + if (cpuid->entries[i].function == function && + cpuid->entries[i].index == index) + return &cpuid->entries[i]; + } + + TEST_FAIL("CPUID function 0x%x index 0x%x not found ", function, index); + + return NULL; +} + +#define X86_HYPERCALL(inputs...) \ +({ \ + uint64_t r; \ + \ + asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t" \ + "jnz 1f\n\t" \ + "vmcall\n\t" \ + "jmp 2f\n\t" \ + "1: vmmcall\n\t" \ + "2:" \ + : "=a"(r) \ + : [use_vmmcall] "r" (host_cpu_is_amd), inputs); \ + \ + r; \ +}) + +uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, + uint64_t a3) +{ + return X86_HYPERCALL("a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3)); +} + +uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1) +{ + return X86_HYPERCALL("a"(nr), "D"(a0), "S"(a1)); +} + +void xen_hypercall(uint64_t nr, uint64_t a0, void *a1) +{ + GUEST_ASSERT(!__xen_hypercall(nr, a0, a1)); +} + +const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void) +{ + static struct kvm_cpuid2 *cpuid; + int kvm_fd; + + if (cpuid) + return cpuid; + + cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES); + kvm_fd = open_kvm_dev_path_or_exit(); + + kvm_ioctl(kvm_fd, KVM_GET_SUPPORTED_HV_CPUID, cpuid); + + close(kvm_fd); + return cpuid; +} + +void vcpu_set_hv_cpuid(struct kvm_vcpu *vcpu) +{ + static struct kvm_cpuid2 *cpuid_full; + const struct kvm_cpuid2 *cpuid_sys, *cpuid_hv; + int i, nent = 0; + + if (!cpuid_full) { + cpuid_sys = kvm_get_supported_cpuid(); + cpuid_hv = kvm_get_supported_hv_cpuid(); + + cpuid_full = allocate_kvm_cpuid2(cpuid_sys->nent + cpuid_hv->nent); + if (!cpuid_full) { + perror("malloc"); + abort(); + } + + /* Need to skip KVM CPUID leaves 0x400000xx */ + for (i = 0; i < cpuid_sys->nent; i++) { + if (cpuid_sys->entries[i].function >= 0x40000000 && + cpuid_sys->entries[i].function < 0x40000100) + continue; + cpuid_full->entries[nent] = cpuid_sys->entries[i]; + nent++; + } + + memcpy(&cpuid_full->entries[nent], cpuid_hv->entries, + cpuid_hv->nent * sizeof(struct kvm_cpuid_entry2)); + cpuid_full->nent = nent + cpuid_hv->nent; + } + + vcpu_init_cpuid(vcpu, cpuid_full); +} + +const struct kvm_cpuid2 *vcpu_get_supported_hv_cpuid(struct kvm_vcpu *vcpu) +{ + struct kvm_cpuid2 *cpuid = allocate_kvm_cpuid2(MAX_NR_CPUID_ENTRIES); + + vcpu_ioctl(vcpu, KVM_GET_SUPPORTED_HV_CPUID, cpuid); + + return cpuid; +} + +unsigned long vm_compute_max_gfn(struct kvm_vm *vm) +{ + const unsigned long num_ht_pages = 12 << (30 - vm->page_shift); /* 12 GiB */ + unsigned long ht_gfn, max_gfn, max_pfn; + uint8_t maxphyaddr; + + max_gfn = (1ULL << (vm->pa_bits - vm->page_shift)) - 1; + + /* Avoid reserved HyperTransport region on AMD processors. */ + if (!host_cpu_is_amd) + return max_gfn; + + /* On parts with <40 physical address bits, the area is fully hidden */ + if (vm->pa_bits < 40) + return max_gfn; + + /* Before family 17h, the HyperTransport area is just below 1T. */ + ht_gfn = (1 << 28) - num_ht_pages; + if (this_cpu_family() < 0x17) + goto done; + + /* + * Otherwise it's at the top of the physical address space, possibly + * reduced due to SME by bits 11:6 of CPUID[0x8000001f].EBX. Use + * the old conservative value if MAXPHYADDR is not enumerated. + */ + if (!this_cpu_has_p(X86_PROPERTY_MAX_PHY_ADDR)) + goto done; + + maxphyaddr = this_cpu_property(X86_PROPERTY_MAX_PHY_ADDR); + max_pfn = (1ULL << (maxphyaddr - vm->page_shift)) - 1; + + if (this_cpu_has_p(X86_PROPERTY_PHYS_ADDR_REDUCTION)) + max_pfn >>= this_cpu_property(X86_PROPERTY_PHYS_ADDR_REDUCTION); + + ht_gfn = max_pfn - num_ht_pages; +done: + return min(max_gfn, ht_gfn - 1); +} + +/* Returns true if kvm_intel was loaded with unrestricted_guest=1. */ +bool vm_is_unrestricted_guest(struct kvm_vm *vm) +{ + /* Ensure that a KVM vendor-specific module is loaded. */ + if (vm == NULL) + close(open_kvm_dev_path_or_exit()); + + return get_kvm_intel_param_bool("unrestricted_guest"); +} + +void kvm_selftest_arch_init(void) +{ + host_cpu_is_intel = this_cpu_is_intel(); + host_cpu_is_amd = this_cpu_is_amd(); +} diff --git a/tools/testing/selftests/kvm/lib/x86_64/svm.c b/tools/testing/selftests/kvm/lib/x86_64/svm.c new file mode 100644 index 0000000000..5495a92dfd --- /dev/null +++ b/tools/testing/selftests/kvm/lib/x86_64/svm.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * tools/testing/selftests/kvm/lib/x86_64/svm.c + * Helpers used for nested SVM testing + * Largely inspired from KVM unit test svm.c + * + * Copyright (C) 2020, Red Hat, Inc. + */ + +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" +#include "svm_util.h" + +#define SEV_DEV_PATH "/dev/sev" + +struct gpr64_regs guest_regs; +u64 rflags; + +/* Allocate memory regions for nested SVM tests. + * + * Input Args: + * vm - The VM to allocate guest-virtual addresses in. + * + * Output Args: + * p_svm_gva - The guest virtual address for the struct svm_test_data. + * + * Return: + * Pointer to structure with the addresses of the SVM areas. + */ +struct svm_test_data * +vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva) +{ + vm_vaddr_t svm_gva = vm_vaddr_alloc_page(vm); + struct svm_test_data *svm = addr_gva2hva(vm, svm_gva); + + svm->vmcb = (void *)vm_vaddr_alloc_page(vm); + svm->vmcb_hva = addr_gva2hva(vm, (uintptr_t)svm->vmcb); + svm->vmcb_gpa = addr_gva2gpa(vm, (uintptr_t)svm->vmcb); + + svm->save_area = (void *)vm_vaddr_alloc_page(vm); + svm->save_area_hva = addr_gva2hva(vm, (uintptr_t)svm->save_area); + svm->save_area_gpa = addr_gva2gpa(vm, (uintptr_t)svm->save_area); + + svm->msr = (void *)vm_vaddr_alloc_page(vm); + svm->msr_hva = addr_gva2hva(vm, (uintptr_t)svm->msr); + svm->msr_gpa = addr_gva2gpa(vm, (uintptr_t)svm->msr); + memset(svm->msr_hva, 0, getpagesize()); + + *p_svm_gva = svm_gva; + return svm; +} + +static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector, + u64 base, u32 limit, u32 attr) +{ + seg->selector = selector; + seg->attrib = attr; + seg->limit = limit; + seg->base = base; +} + +void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp) +{ + struct vmcb *vmcb = svm->vmcb; + uint64_t vmcb_gpa = svm->vmcb_gpa; + struct vmcb_save_area *save = &vmcb->save; + struct vmcb_control_area *ctrl = &vmcb->control; + u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK + | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK; + u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK + | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK; + uint64_t efer; + + efer = rdmsr(MSR_EFER); + wrmsr(MSR_EFER, efer | EFER_SVME); + wrmsr(MSR_VM_HSAVE_PA, svm->save_area_gpa); + + memset(vmcb, 0, sizeof(*vmcb)); + asm volatile ("vmsave %0\n\t" : : "a" (vmcb_gpa) : "memory"); + vmcb_set_seg(&save->es, get_es(), 0, -1U, data_seg_attr); + vmcb_set_seg(&save->cs, get_cs(), 0, -1U, code_seg_attr); + vmcb_set_seg(&save->ss, get_ss(), 0, -1U, data_seg_attr); + vmcb_set_seg(&save->ds, get_ds(), 0, -1U, data_seg_attr); + vmcb_set_seg(&save->gdtr, 0, get_gdt().address, get_gdt().size, 0); + vmcb_set_seg(&save->idtr, 0, get_idt().address, get_idt().size, 0); + + ctrl->asid = 1; + save->cpl = 0; + save->efer = rdmsr(MSR_EFER); + asm volatile ("mov %%cr4, %0" : "=r"(save->cr4) : : "memory"); + asm volatile ("mov %%cr3, %0" : "=r"(save->cr3) : : "memory"); + asm volatile ("mov %%cr0, %0" : "=r"(save->cr0) : : "memory"); + asm volatile ("mov %%dr7, %0" : "=r"(save->dr7) : : "memory"); + asm volatile ("mov %%dr6, %0" : "=r"(save->dr6) : : "memory"); + asm volatile ("mov %%cr2, %0" : "=r"(save->cr2) : : "memory"); + save->g_pat = rdmsr(MSR_IA32_CR_PAT); + save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR); + ctrl->intercept = (1ULL << INTERCEPT_VMRUN) | + (1ULL << INTERCEPT_VMMCALL); + ctrl->msrpm_base_pa = svm->msr_gpa; + + vmcb->save.rip = (u64)guest_rip; + vmcb->save.rsp = (u64)guest_rsp; + guest_regs.rdi = (u64)svm; +} + +/* + * save/restore 64-bit general registers except rax, rip, rsp + * which are directly handed through the VMCB guest processor state + */ +#define SAVE_GPR_C \ + "xchg %%rbx, guest_regs+0x20\n\t" \ + "xchg %%rcx, guest_regs+0x10\n\t" \ + "xchg %%rdx, guest_regs+0x18\n\t" \ + "xchg %%rbp, guest_regs+0x30\n\t" \ + "xchg %%rsi, guest_regs+0x38\n\t" \ + "xchg %%rdi, guest_regs+0x40\n\t" \ + "xchg %%r8, guest_regs+0x48\n\t" \ + "xchg %%r9, guest_regs+0x50\n\t" \ + "xchg %%r10, guest_regs+0x58\n\t" \ + "xchg %%r11, guest_regs+0x60\n\t" \ + "xchg %%r12, guest_regs+0x68\n\t" \ + "xchg %%r13, guest_regs+0x70\n\t" \ + "xchg %%r14, guest_regs+0x78\n\t" \ + "xchg %%r15, guest_regs+0x80\n\t" + +#define LOAD_GPR_C SAVE_GPR_C + +/* + * selftests do not use interrupts so we dropped clgi/sti/cli/stgi + * for now. registers involved in LOAD/SAVE_GPR_C are eventually + * unmodified so they do not need to be in the clobber list. + */ +void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa) +{ + asm volatile ( + "vmload %[vmcb_gpa]\n\t" + "mov rflags, %%r15\n\t" // rflags + "mov %%r15, 0x170(%[vmcb])\n\t" + "mov guest_regs, %%r15\n\t" // rax + "mov %%r15, 0x1f8(%[vmcb])\n\t" + LOAD_GPR_C + "vmrun %[vmcb_gpa]\n\t" + SAVE_GPR_C + "mov 0x170(%[vmcb]), %%r15\n\t" // rflags + "mov %%r15, rflags\n\t" + "mov 0x1f8(%[vmcb]), %%r15\n\t" // rax + "mov %%r15, guest_regs\n\t" + "vmsave %[vmcb_gpa]\n\t" + : : [vmcb] "r" (vmcb), [vmcb_gpa] "a" (vmcb_gpa) + : "r15", "memory"); +} + +/* + * Open SEV_DEV_PATH if available, otherwise exit the entire program. + * + * Return: + * The opened file descriptor of /dev/sev. + */ +int open_sev_dev_path_or_exit(void) +{ + return open_path_or_exit(SEV_DEV_PATH, 0); +} diff --git a/tools/testing/selftests/kvm/lib/x86_64/ucall.c b/tools/testing/selftests/kvm/lib/x86_64/ucall.c new file mode 100644 index 0000000000..1265cecc7d --- /dev/null +++ b/tools/testing/selftests/kvm/lib/x86_64/ucall.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ucall support. A ucall is a "hypercall to userspace". + * + * Copyright (C) 2018, Red Hat, Inc. + */ +#include "kvm_util.h" + +#define UCALL_PIO_PORT ((uint16_t)0x1000) + +void ucall_arch_do_ucall(vm_vaddr_t uc) +{ + /* + * FIXME: Revert this hack (the entire commit that added it) once nVMX + * preserves L2 GPRs across a nested VM-Exit. If a ucall from L2, e.g. + * to do a GUEST_SYNC(), lands the vCPU in L1, any and all GPRs can be + * clobbered by L1. Save and restore non-volatile GPRs (clobbering RBP + * in particular is problematic) along with RDX and RDI (which are + * inputs), and clobber volatile GPRs. *sigh* + */ +#define HORRIFIC_L2_UCALL_CLOBBER_HACK \ + "rcx", "rsi", "r8", "r9", "r10", "r11" + + asm volatile("push %%rbp\n\t" + "push %%r15\n\t" + "push %%r14\n\t" + "push %%r13\n\t" + "push %%r12\n\t" + "push %%rbx\n\t" + "push %%rdx\n\t" + "push %%rdi\n\t" + "in %[port], %%al\n\t" + "pop %%rdi\n\t" + "pop %%rdx\n\t" + "pop %%rbx\n\t" + "pop %%r12\n\t" + "pop %%r13\n\t" + "pop %%r14\n\t" + "pop %%r15\n\t" + "pop %%rbp\n\t" + : : [port] "d" (UCALL_PIO_PORT), "D" (uc) : "rax", "memory", + HORRIFIC_L2_UCALL_CLOBBER_HACK); +} + +void *ucall_arch_get_ucall(struct kvm_vcpu *vcpu) +{ + struct kvm_run *run = vcpu->run; + + if (run->exit_reason == KVM_EXIT_IO && run->io.port == UCALL_PIO_PORT) { + struct kvm_regs regs; + + vcpu_regs_get(vcpu, ®s); + return (void *)regs.rdi; + } + return NULL; +} diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c new file mode 100644 index 0000000000..59d97531c9 --- /dev/null +++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c @@ -0,0 +1,554 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * tools/testing/selftests/kvm/lib/x86_64/vmx.c + * + * Copyright (C) 2018, Google LLC. + */ + +#include <asm/msr-index.h> + +#include "test_util.h" +#include "kvm_util.h" +#include "processor.h" +#include "vmx.h" + +#define PAGE_SHIFT_4K 12 + +#define KVM_EPT_PAGE_TABLE_MIN_PADDR 0x1c0000 + +bool enable_evmcs; + +struct hv_enlightened_vmcs *current_evmcs; +struct hv_vp_assist_page *current_vp_assist; + +struct eptPageTableEntry { + uint64_t readable:1; + uint64_t writable:1; + uint64_t executable:1; + uint64_t memory_type:3; + uint64_t ignore_pat:1; + uint64_t page_size:1; + uint64_t accessed:1; + uint64_t dirty:1; + uint64_t ignored_11_10:2; + uint64_t address:40; + uint64_t ignored_62_52:11; + uint64_t suppress_ve:1; +}; + +struct eptPageTablePointer { + uint64_t memory_type:3; + uint64_t page_walk_length:3; + uint64_t ad_enabled:1; + uint64_t reserved_11_07:5; + uint64_t address:40; + uint64_t reserved_63_52:12; +}; +int vcpu_enable_evmcs(struct kvm_vcpu *vcpu) +{ + uint16_t evmcs_ver; + + vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENLIGHTENED_VMCS, + (unsigned long)&evmcs_ver); + + /* KVM should return supported EVMCS version range */ + TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) && + (evmcs_ver & 0xff) > 0, + "Incorrect EVMCS version range: %x:%x\n", + evmcs_ver & 0xff, evmcs_ver >> 8); + + return evmcs_ver; +} + +/* Allocate memory regions for nested VMX tests. + * + * Input Args: + * vm - The VM to allocate guest-virtual addresses in. + * + * Output Args: + * p_vmx_gva - The guest virtual address for the struct vmx_pages. + * + * Return: + * Pointer to structure with the addresses of the VMX areas. + */ +struct vmx_pages * +vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva) +{ + vm_vaddr_t vmx_gva = vm_vaddr_alloc_page(vm); + struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva); + + /* Setup of a region of guest memory for the vmxon region. */ + vmx->vmxon = (void *)vm_vaddr_alloc_page(vm); + vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon); + vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon); + + /* Setup of a region of guest memory for a vmcs. */ + vmx->vmcs = (void *)vm_vaddr_alloc_page(vm); + vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs); + vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs); + + /* Setup of a region of guest memory for the MSR bitmap. */ + vmx->msr = (void *)vm_vaddr_alloc_page(vm); + vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr); + vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr); + memset(vmx->msr_hva, 0, getpagesize()); + + /* Setup of a region of guest memory for the shadow VMCS. */ + vmx->shadow_vmcs = (void *)vm_vaddr_alloc_page(vm); + vmx->shadow_vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->shadow_vmcs); + vmx->shadow_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->shadow_vmcs); + + /* Setup of a region of guest memory for the VMREAD and VMWRITE bitmaps. */ + vmx->vmread = (void *)vm_vaddr_alloc_page(vm); + vmx->vmread_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmread); + vmx->vmread_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmread); + memset(vmx->vmread_hva, 0, getpagesize()); + + vmx->vmwrite = (void *)vm_vaddr_alloc_page(vm); + vmx->vmwrite_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmwrite); + vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite); + memset(vmx->vmwrite_hva, 0, getpagesize()); + + *p_vmx_gva = vmx_gva; + return vmx; +} + +bool prepare_for_vmx_operation(struct vmx_pages *vmx) +{ + uint64_t feature_control; + uint64_t required; + unsigned long cr0; + unsigned long cr4; + + /* + * Ensure bits in CR0 and CR4 are valid in VMX operation: + * - Bit X is 1 in _FIXED0: bit X is fixed to 1 in CRx. + * - Bit X is 0 in _FIXED1: bit X is fixed to 0 in CRx. + */ + __asm__ __volatile__("mov %%cr0, %0" : "=r"(cr0) : : "memory"); + cr0 &= rdmsr(MSR_IA32_VMX_CR0_FIXED1); + cr0 |= rdmsr(MSR_IA32_VMX_CR0_FIXED0); + __asm__ __volatile__("mov %0, %%cr0" : : "r"(cr0) : "memory"); + + __asm__ __volatile__("mov %%cr4, %0" : "=r"(cr4) : : "memory"); + cr4 &= rdmsr(MSR_IA32_VMX_CR4_FIXED1); + cr4 |= rdmsr(MSR_IA32_VMX_CR4_FIXED0); + /* Enable VMX operation */ + cr4 |= X86_CR4_VMXE; + __asm__ __volatile__("mov %0, %%cr4" : : "r"(cr4) : "memory"); + + /* + * Configure IA32_FEATURE_CONTROL MSR to allow VMXON: + * Bit 0: Lock bit. If clear, VMXON causes a #GP. + * Bit 2: Enables VMXON outside of SMX operation. If clear, VMXON + * outside of SMX causes a #GP. + */ + required = FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX; + required |= FEAT_CTL_LOCKED; + feature_control = rdmsr(MSR_IA32_FEAT_CTL); + if ((feature_control & required) != required) + wrmsr(MSR_IA32_FEAT_CTL, feature_control | required); + + /* Enter VMX root operation. */ + *(uint32_t *)(vmx->vmxon) = vmcs_revision(); + if (vmxon(vmx->vmxon_gpa)) + return false; + + return true; +} + +bool load_vmcs(struct vmx_pages *vmx) +{ + /* Load a VMCS. */ + *(uint32_t *)(vmx->vmcs) = vmcs_revision(); + if (vmclear(vmx->vmcs_gpa)) + return false; + + if (vmptrld(vmx->vmcs_gpa)) + return false; + + /* Setup shadow VMCS, do not load it yet. */ + *(uint32_t *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul; + if (vmclear(vmx->shadow_vmcs_gpa)) + return false; + + return true; +} + +static bool ept_vpid_cap_supported(uint64_t mask) +{ + return rdmsr(MSR_IA32_VMX_EPT_VPID_CAP) & mask; +} + +bool ept_1g_pages_supported(void) +{ + return ept_vpid_cap_supported(VMX_EPT_VPID_CAP_1G_PAGES); +} + +/* + * Initialize the control fields to the most basic settings possible. + */ +static inline void init_vmcs_control_fields(struct vmx_pages *vmx) +{ + uint32_t sec_exec_ctl = 0; + + vmwrite(VIRTUAL_PROCESSOR_ID, 0); + vmwrite(POSTED_INTR_NV, 0); + + vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS)); + + if (vmx->eptp_gpa) { + uint64_t ept_paddr; + struct eptPageTablePointer eptp = { + .memory_type = VMX_BASIC_MEM_TYPE_WB, + .page_walk_length = 3, /* + 1 */ + .ad_enabled = ept_vpid_cap_supported(VMX_EPT_VPID_CAP_AD_BITS), + .address = vmx->eptp_gpa >> PAGE_SHIFT_4K, + }; + + memcpy(&ept_paddr, &eptp, sizeof(ept_paddr)); + vmwrite(EPT_POINTER, ept_paddr); + sec_exec_ctl |= SECONDARY_EXEC_ENABLE_EPT; + } + + if (!vmwrite(SECONDARY_VM_EXEC_CONTROL, sec_exec_ctl)) + vmwrite(CPU_BASED_VM_EXEC_CONTROL, + rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS) | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS); + else { + vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS)); + GUEST_ASSERT(!sec_exec_ctl); + } + + vmwrite(EXCEPTION_BITMAP, 0); + vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0); + vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */ + vmwrite(CR3_TARGET_COUNT, 0); + vmwrite(VM_EXIT_CONTROLS, rdmsr(MSR_IA32_VMX_EXIT_CTLS) | + VM_EXIT_HOST_ADDR_SPACE_SIZE); /* 64-bit host */ + vmwrite(VM_EXIT_MSR_STORE_COUNT, 0); + vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0); + vmwrite(VM_ENTRY_CONTROLS, rdmsr(MSR_IA32_VMX_ENTRY_CTLS) | + VM_ENTRY_IA32E_MODE); /* 64-bit guest */ + vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0); + vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0); + vmwrite(TPR_THRESHOLD, 0); + + vmwrite(CR0_GUEST_HOST_MASK, 0); + vmwrite(CR4_GUEST_HOST_MASK, 0); + vmwrite(CR0_READ_SHADOW, get_cr0()); + vmwrite(CR4_READ_SHADOW, get_cr4()); + + vmwrite(MSR_BITMAP, vmx->msr_gpa); + vmwrite(VMREAD_BITMAP, vmx->vmread_gpa); + vmwrite(VMWRITE_BITMAP, vmx->vmwrite_gpa); +} + +/* + * Initialize the host state fields based on the current host state, with + * the exception of HOST_RSP and HOST_RIP, which should be set by vmlaunch + * or vmresume. + */ +static inline void init_vmcs_host_state(void) +{ + uint32_t exit_controls = vmreadz(VM_EXIT_CONTROLS); + + vmwrite(HOST_ES_SELECTOR, get_es()); + vmwrite(HOST_CS_SELECTOR, get_cs()); + vmwrite(HOST_SS_SELECTOR, get_ss()); + vmwrite(HOST_DS_SELECTOR, get_ds()); + vmwrite(HOST_FS_SELECTOR, get_fs()); + vmwrite(HOST_GS_SELECTOR, get_gs()); + vmwrite(HOST_TR_SELECTOR, get_tr()); + + if (exit_controls & VM_EXIT_LOAD_IA32_PAT) + vmwrite(HOST_IA32_PAT, rdmsr(MSR_IA32_CR_PAT)); + if (exit_controls & VM_EXIT_LOAD_IA32_EFER) + vmwrite(HOST_IA32_EFER, rdmsr(MSR_EFER)); + if (exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) + vmwrite(HOST_IA32_PERF_GLOBAL_CTRL, + rdmsr(MSR_CORE_PERF_GLOBAL_CTRL)); + + vmwrite(HOST_IA32_SYSENTER_CS, rdmsr(MSR_IA32_SYSENTER_CS)); + + vmwrite(HOST_CR0, get_cr0()); + vmwrite(HOST_CR3, get_cr3()); + vmwrite(HOST_CR4, get_cr4()); + vmwrite(HOST_FS_BASE, rdmsr(MSR_FS_BASE)); + vmwrite(HOST_GS_BASE, rdmsr(MSR_GS_BASE)); + vmwrite(HOST_TR_BASE, + get_desc64_base((struct desc64 *)(get_gdt().address + get_tr()))); + vmwrite(HOST_GDTR_BASE, get_gdt().address); + vmwrite(HOST_IDTR_BASE, get_idt().address); + vmwrite(HOST_IA32_SYSENTER_ESP, rdmsr(MSR_IA32_SYSENTER_ESP)); + vmwrite(HOST_IA32_SYSENTER_EIP, rdmsr(MSR_IA32_SYSENTER_EIP)); +} + +/* + * Initialize the guest state fields essentially as a clone of + * the host state fields. Some host state fields have fixed + * values, and we set the corresponding guest state fields accordingly. + */ +static inline void init_vmcs_guest_state(void *rip, void *rsp) +{ + vmwrite(GUEST_ES_SELECTOR, vmreadz(HOST_ES_SELECTOR)); + vmwrite(GUEST_CS_SELECTOR, vmreadz(HOST_CS_SELECTOR)); + vmwrite(GUEST_SS_SELECTOR, vmreadz(HOST_SS_SELECTOR)); + vmwrite(GUEST_DS_SELECTOR, vmreadz(HOST_DS_SELECTOR)); + vmwrite(GUEST_FS_SELECTOR, vmreadz(HOST_FS_SELECTOR)); + vmwrite(GUEST_GS_SELECTOR, vmreadz(HOST_GS_SELECTOR)); + vmwrite(GUEST_LDTR_SELECTOR, 0); + vmwrite(GUEST_TR_SELECTOR, vmreadz(HOST_TR_SELECTOR)); + vmwrite(GUEST_INTR_STATUS, 0); + vmwrite(GUEST_PML_INDEX, 0); + + vmwrite(VMCS_LINK_POINTER, -1ll); + vmwrite(GUEST_IA32_DEBUGCTL, 0); + vmwrite(GUEST_IA32_PAT, vmreadz(HOST_IA32_PAT)); + vmwrite(GUEST_IA32_EFER, vmreadz(HOST_IA32_EFER)); + vmwrite(GUEST_IA32_PERF_GLOBAL_CTRL, + vmreadz(HOST_IA32_PERF_GLOBAL_CTRL)); + + vmwrite(GUEST_ES_LIMIT, -1); + vmwrite(GUEST_CS_LIMIT, -1); + vmwrite(GUEST_SS_LIMIT, -1); + vmwrite(GUEST_DS_LIMIT, -1); + vmwrite(GUEST_FS_LIMIT, -1); + vmwrite(GUEST_GS_LIMIT, -1); + vmwrite(GUEST_LDTR_LIMIT, -1); + vmwrite(GUEST_TR_LIMIT, 0x67); + vmwrite(GUEST_GDTR_LIMIT, 0xffff); + vmwrite(GUEST_IDTR_LIMIT, 0xffff); + vmwrite(GUEST_ES_AR_BYTES, + vmreadz(GUEST_ES_SELECTOR) == 0 ? 0x10000 : 0xc093); + vmwrite(GUEST_CS_AR_BYTES, 0xa09b); + vmwrite(GUEST_SS_AR_BYTES, 0xc093); + vmwrite(GUEST_DS_AR_BYTES, + vmreadz(GUEST_DS_SELECTOR) == 0 ? 0x10000 : 0xc093); + vmwrite(GUEST_FS_AR_BYTES, + vmreadz(GUEST_FS_SELECTOR) == 0 ? 0x10000 : 0xc093); + vmwrite(GUEST_GS_AR_BYTES, + vmreadz(GUEST_GS_SELECTOR) == 0 ? 0x10000 : 0xc093); + vmwrite(GUEST_LDTR_AR_BYTES, 0x10000); + vmwrite(GUEST_TR_AR_BYTES, 0x8b); + vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0); + vmwrite(GUEST_ACTIVITY_STATE, 0); + vmwrite(GUEST_SYSENTER_CS, vmreadz(HOST_IA32_SYSENTER_CS)); + vmwrite(VMX_PREEMPTION_TIMER_VALUE, 0); + + vmwrite(GUEST_CR0, vmreadz(HOST_CR0)); + vmwrite(GUEST_CR3, vmreadz(HOST_CR3)); + vmwrite(GUEST_CR4, vmreadz(HOST_CR4)); + vmwrite(GUEST_ES_BASE, 0); + vmwrite(GUEST_CS_BASE, 0); + vmwrite(GUEST_SS_BASE, 0); + vmwrite(GUEST_DS_BASE, 0); + vmwrite(GUEST_FS_BASE, vmreadz(HOST_FS_BASE)); + vmwrite(GUEST_GS_BASE, vmreadz(HOST_GS_BASE)); + vmwrite(GUEST_LDTR_BASE, 0); + vmwrite(GUEST_TR_BASE, vmreadz(HOST_TR_BASE)); + vmwrite(GUEST_GDTR_BASE, vmreadz(HOST_GDTR_BASE)); + vmwrite(GUEST_IDTR_BASE, vmreadz(HOST_IDTR_BASE)); + vmwrite(GUEST_DR7, 0x400); + vmwrite(GUEST_RSP, (uint64_t)rsp); + vmwrite(GUEST_RIP, (uint64_t)rip); + vmwrite(GUEST_RFLAGS, 2); + vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, 0); + vmwrite(GUEST_SYSENTER_ESP, vmreadz(HOST_IA32_SYSENTER_ESP)); + vmwrite(GUEST_SYSENTER_EIP, vmreadz(HOST_IA32_SYSENTER_EIP)); +} + +void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp) +{ + init_vmcs_control_fields(vmx); + init_vmcs_host_state(); + init_vmcs_guest_state(guest_rip, guest_rsp); +} + +static void nested_create_pte(struct kvm_vm *vm, + struct eptPageTableEntry *pte, + uint64_t nested_paddr, + uint64_t paddr, + int current_level, + int target_level) +{ + if (!pte->readable) { + pte->writable = true; + pte->readable = true; + pte->executable = true; + pte->page_size = (current_level == target_level); + if (pte->page_size) + pte->address = paddr >> vm->page_shift; + else + pte->address = vm_alloc_page_table(vm) >> vm->page_shift; + } else { + /* + * Entry already present. Assert that the caller doesn't want + * a hugepage at this level, and that there isn't a hugepage at + * this level. + */ + TEST_ASSERT(current_level != target_level, + "Cannot create hugepage at level: %u, nested_paddr: 0x%lx\n", + current_level, nested_paddr); + TEST_ASSERT(!pte->page_size, + "Cannot create page table at level: %u, nested_paddr: 0x%lx\n", + current_level, nested_paddr); + } +} + + +void __nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, + uint64_t nested_paddr, uint64_t paddr, int target_level) +{ + const uint64_t page_size = PG_LEVEL_SIZE(target_level); + struct eptPageTableEntry *pt = vmx->eptp_hva, *pte; + uint16_t index; + + TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use " + "unknown or unsupported guest mode, mode: 0x%x", vm->mode); + + TEST_ASSERT((nested_paddr >> 48) == 0, + "Nested physical address 0x%lx requires 5-level paging", + nested_paddr); + TEST_ASSERT((nested_paddr % page_size) == 0, + "Nested physical address not on page boundary,\n" + " nested_paddr: 0x%lx page_size: 0x%lx", + nested_paddr, page_size); + TEST_ASSERT((nested_paddr >> vm->page_shift) <= vm->max_gfn, + "Physical address beyond beyond maximum supported,\n" + " nested_paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", + paddr, vm->max_gfn, vm->page_size); + TEST_ASSERT((paddr % page_size) == 0, + "Physical address not on page boundary,\n" + " paddr: 0x%lx page_size: 0x%lx", + paddr, page_size); + TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn, + "Physical address beyond beyond maximum supported,\n" + " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x", + paddr, vm->max_gfn, vm->page_size); + + for (int level = PG_LEVEL_512G; level >= PG_LEVEL_4K; level--) { + index = (nested_paddr >> PG_LEVEL_SHIFT(level)) & 0x1ffu; + pte = &pt[index]; + + nested_create_pte(vm, pte, nested_paddr, paddr, level, target_level); + + if (pte->page_size) + break; + + pt = addr_gpa2hva(vm, pte->address * vm->page_size); + } + + /* + * For now mark these as accessed and dirty because the only + * testcase we have needs that. Can be reconsidered later. + */ + pte->accessed = true; + pte->dirty = true; + +} + +void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm, + uint64_t nested_paddr, uint64_t paddr) +{ + __nested_pg_map(vmx, vm, nested_paddr, paddr, PG_LEVEL_4K); +} + +/* + * Map a range of EPT guest physical addresses to the VM's physical address + * + * Input Args: + * vm - Virtual Machine + * nested_paddr - Nested guest physical address to map + * paddr - VM Physical Address + * size - The size of the range to map + * level - The level at which to map the range + * + * Output Args: None + * + * Return: None + * + * Within the VM given by vm, creates a nested guest translation for the + * page range starting at nested_paddr to the page range starting at paddr. + */ +void __nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, + uint64_t nested_paddr, uint64_t paddr, uint64_t size, + int level) +{ + size_t page_size = PG_LEVEL_SIZE(level); + size_t npages = size / page_size; + + TEST_ASSERT(nested_paddr + size > nested_paddr, "Vaddr overflow"); + TEST_ASSERT(paddr + size > paddr, "Paddr overflow"); + + while (npages--) { + __nested_pg_map(vmx, vm, nested_paddr, paddr, level); + nested_paddr += page_size; + paddr += page_size; + } +} + +void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm, + uint64_t nested_paddr, uint64_t paddr, uint64_t size) +{ + __nested_map(vmx, vm, nested_paddr, paddr, size, PG_LEVEL_4K); +} + +/* Prepare an identity extended page table that maps all the + * physical pages in VM. + */ +void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm, + uint32_t memslot) +{ + sparsebit_idx_t i, last; + struct userspace_mem_region *region = + memslot2region(vm, memslot); + + i = (region->region.guest_phys_addr >> vm->page_shift) - 1; + last = i + (region->region.memory_size >> vm->page_shift); + for (;;) { + i = sparsebit_next_clear(region->unused_phy_pages, i); + if (i > last) + break; + + nested_map(vmx, vm, + (uint64_t)i << vm->page_shift, + (uint64_t)i << vm->page_shift, + 1 << vm->page_shift); + } +} + +/* Identity map a region with 1GiB Pages. */ +void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm, + uint64_t addr, uint64_t size) +{ + __nested_map(vmx, vm, addr, addr, size, PG_LEVEL_1G); +} + +bool kvm_cpu_has_ept(void) +{ + uint64_t ctrl; + + ctrl = kvm_get_feature_msr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS) >> 32; + if (!(ctrl & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) + return false; + + ctrl = kvm_get_feature_msr(MSR_IA32_VMX_PROCBASED_CTLS2) >> 32; + return ctrl & SECONDARY_EXEC_ENABLE_EPT; +} + +void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm, + uint32_t eptp_memslot) +{ + TEST_ASSERT(kvm_cpu_has_ept(), "KVM doesn't support nested EPT"); + + vmx->eptp = (void *)vm_vaddr_alloc_page(vm); + vmx->eptp_hva = addr_gva2hva(vm, (uintptr_t)vmx->eptp); + vmx->eptp_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->eptp); +} + +void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm) +{ + vmx->apic_access = (void *)vm_vaddr_alloc_page(vm); + vmx->apic_access_hva = addr_gva2hva(vm, (uintptr_t)vmx->apic_access); + vmx->apic_access_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->apic_access); +} |