diff options
Diffstat (limited to 'drivers/virt/nitro_enclaves')
-rw-r--r-- | drivers/virt/nitro_enclaves/Kconfig | 26 | ||||
-rw-r--r-- | drivers/virt/nitro_enclaves/Makefile | 9 | ||||
-rw-r--r-- | drivers/virt/nitro_enclaves/ne_misc_dev.c | 1783 | ||||
-rw-r--r-- | drivers/virt/nitro_enclaves/ne_misc_dev.h | 109 | ||||
-rw-r--r-- | drivers/virt/nitro_enclaves/ne_misc_dev_test.c | 154 | ||||
-rw-r--r-- | drivers/virt/nitro_enclaves/ne_pci_dev.c | 626 | ||||
-rw-r--r-- | drivers/virt/nitro_enclaves/ne_pci_dev.h | 331 |
7 files changed, 3038 insertions, 0 deletions
diff --git a/drivers/virt/nitro_enclaves/Kconfig b/drivers/virt/nitro_enclaves/Kconfig new file mode 100644 index 000000000..dc4d25c26 --- /dev/null +++ b/drivers/virt/nitro_enclaves/Kconfig @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright 2020-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +# Amazon Nitro Enclaves (NE) support. +# Nitro is a hypervisor that has been developed by Amazon. + +config NITRO_ENCLAVES + tristate "Nitro Enclaves Support" + depends on (ARM64 || X86) && HOTPLUG_CPU && PCI && SMP + help + This driver consists of support for enclave lifetime management + for Nitro Enclaves (NE). + + To compile this driver as a module, choose M here. + The module will be called nitro_enclaves. + +config NITRO_ENCLAVES_MISC_DEV_TEST + bool "Tests for the misc device functionality of the Nitro Enclaves" if !KUNIT_ALL_TESTS + depends on NITRO_ENCLAVES && KUNIT=y + default KUNIT_ALL_TESTS + help + Enable KUnit tests for the misc device functionality of the Nitro + Enclaves. Select this option only if you will boot the kernel for + the purpose of running unit tests (e.g. under UML or qemu). If + unsure, say N. diff --git a/drivers/virt/nitro_enclaves/Makefile b/drivers/virt/nitro_enclaves/Makefile new file mode 100644 index 000000000..da61260f2 --- /dev/null +++ b/drivers/virt/nitro_enclaves/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + +# Enclave lifetime management support for Nitro Enclaves (NE). + +obj-$(CONFIG_NITRO_ENCLAVES) += nitro_enclaves.o + +nitro_enclaves-y := ne_pci_dev.o ne_misc_dev.o diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.c b/drivers/virt/nitro_enclaves/ne_misc_dev.c new file mode 100644 index 000000000..241b94f62 --- /dev/null +++ b/drivers/virt/nitro_enclaves/ne_misc_dev.c @@ -0,0 +1,1783 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2020-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + */ + +/** + * DOC: Enclave lifetime management driver for Nitro Enclaves (NE). + * Nitro is a hypervisor that has been developed by Amazon. + */ + +#include <linux/anon_inodes.h> +#include <linux/capability.h> +#include <linux/cpu.h> +#include <linux/device.h> +#include <linux/file.h> +#include <linux/hugetlb.h> +#include <linux/limits.h> +#include <linux/list.h> +#include <linux/miscdevice.h> +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/nitro_enclaves.h> +#include <linux/pci.h> +#include <linux/poll.h> +#include <linux/range.h> +#include <linux/slab.h> +#include <linux/types.h> +#include <uapi/linux/vm_sockets.h> + +#include "ne_misc_dev.h" +#include "ne_pci_dev.h" + +/** + * NE_CPUS_SIZE - Size for max 128 CPUs, for now, in a cpu-list string, comma + * separated. The NE CPU pool includes CPUs from a single NUMA + * node. + */ +#define NE_CPUS_SIZE (512) + +/** + * NE_EIF_LOAD_OFFSET - The offset where to copy the Enclave Image Format (EIF) + * image in enclave memory. + */ +#define NE_EIF_LOAD_OFFSET (8 * 1024UL * 1024UL) + +/** + * NE_MIN_ENCLAVE_MEM_SIZE - The minimum memory size an enclave can be launched + * with. + */ +#define NE_MIN_ENCLAVE_MEM_SIZE (64 * 1024UL * 1024UL) + +/** + * NE_MIN_MEM_REGION_SIZE - The minimum size of an enclave memory region. + */ +#define NE_MIN_MEM_REGION_SIZE (2 * 1024UL * 1024UL) + +/** + * NE_PARENT_VM_CID - The CID for the vsock device of the primary / parent VM. + */ +#define NE_PARENT_VM_CID (3) + +static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg); + +static const struct file_operations ne_fops = { + .owner = THIS_MODULE, + .llseek = noop_llseek, + .unlocked_ioctl = ne_ioctl, +}; + +static struct miscdevice ne_misc_dev = { + .minor = MISC_DYNAMIC_MINOR, + .name = "nitro_enclaves", + .fops = &ne_fops, + .mode = 0660, +}; + +struct ne_devs ne_devs = { + .ne_misc_dev = &ne_misc_dev, +}; + +/* + * TODO: Update logic to create new sysfs entries instead of using + * a kernel parameter e.g. if multiple sysfs files needed. + */ +static int ne_set_kernel_param(const char *val, const struct kernel_param *kp); + +static const struct kernel_param_ops ne_cpu_pool_ops = { + .get = param_get_string, + .set = ne_set_kernel_param, +}; + +static char ne_cpus[NE_CPUS_SIZE]; +static struct kparam_string ne_cpus_arg = { + .maxlen = sizeof(ne_cpus), + .string = ne_cpus, +}; + +module_param_cb(ne_cpus, &ne_cpu_pool_ops, &ne_cpus_arg, 0644); +/* https://www.kernel.org/doc/html/latest/admin-guide/kernel-parameters.html#cpu-lists */ +MODULE_PARM_DESC(ne_cpus, "<cpu-list> - CPU pool used for Nitro Enclaves"); + +/** + * struct ne_cpu_pool - CPU pool used for Nitro Enclaves. + * @avail_threads_per_core: Available full CPU cores to be dedicated to + * enclave(s). The cpumasks from the array, indexed + * by core id, contain all the threads from the + * available cores, that are not set for created + * enclave(s). The full CPU cores are part of the + * NE CPU pool. + * @mutex: Mutex for the access to the NE CPU pool. + * @nr_parent_vm_cores : The size of the available threads per core array. + * The total number of CPU cores available on the + * primary / parent VM. + * @nr_threads_per_core: The number of threads that a full CPU core has. + * @numa_node: NUMA node of the CPUs in the pool. + */ +struct ne_cpu_pool { + cpumask_var_t *avail_threads_per_core; + struct mutex mutex; + unsigned int nr_parent_vm_cores; + unsigned int nr_threads_per_core; + int numa_node; +}; + +static struct ne_cpu_pool ne_cpu_pool; + +/** + * struct ne_phys_contig_mem_regions - Contiguous physical memory regions. + * @num: The number of regions that currently has. + * @regions: The array of physical memory regions. + */ +struct ne_phys_contig_mem_regions { + unsigned long num; + struct range *regions; +}; + +/** + * ne_check_enclaves_created() - Verify if at least one enclave has been created. + * @void: No parameters provided. + * + * Context: Process context. + * Return: + * * True if at least one enclave is created. + * * False otherwise. + */ +static bool ne_check_enclaves_created(void) +{ + struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev; + bool ret = false; + + if (!ne_pci_dev) + return ret; + + mutex_lock(&ne_pci_dev->enclaves_list_mutex); + + if (!list_empty(&ne_pci_dev->enclaves_list)) + ret = true; + + mutex_unlock(&ne_pci_dev->enclaves_list_mutex); + + return ret; +} + +/** + * ne_setup_cpu_pool() - Set the NE CPU pool after handling sanity checks such + * as not sharing CPU cores with the primary / parent VM + * or not using CPU 0, which should remain available for + * the primary / parent VM. Offline the CPUs from the + * pool after the checks passed. + * @ne_cpu_list: The CPU list used for setting NE CPU pool. + * + * Context: Process context. + * Return: + * * 0 on success. + * * Negative return value on failure. + */ +static int ne_setup_cpu_pool(const char *ne_cpu_list) +{ + int core_id = -1; + unsigned int cpu = 0; + cpumask_var_t cpu_pool; + unsigned int cpu_sibling = 0; + unsigned int i = 0; + int numa_node = -1; + int rc = -EINVAL; + + if (!zalloc_cpumask_var(&cpu_pool, GFP_KERNEL)) + return -ENOMEM; + + mutex_lock(&ne_cpu_pool.mutex); + + rc = cpulist_parse(ne_cpu_list, cpu_pool); + if (rc < 0) { + pr_err("%s: Error in cpulist parse [rc=%d]\n", ne_misc_dev.name, rc); + + goto free_pool_cpumask; + } + + cpu = cpumask_any(cpu_pool); + if (cpu >= nr_cpu_ids) { + pr_err("%s: No CPUs available in CPU pool\n", ne_misc_dev.name); + + rc = -EINVAL; + + goto free_pool_cpumask; + } + + /* + * Check if the CPUs are online, to further get info about them + * e.g. numa node, core id, siblings. + */ + for_each_cpu(cpu, cpu_pool) + if (cpu_is_offline(cpu)) { + pr_err("%s: CPU %d is offline, has to be online to get its metadata\n", + ne_misc_dev.name, cpu); + + rc = -EINVAL; + + goto free_pool_cpumask; + } + + /* + * Check if the CPUs from the NE CPU pool are from the same NUMA node. + */ + for_each_cpu(cpu, cpu_pool) + if (numa_node < 0) { + numa_node = cpu_to_node(cpu); + if (numa_node < 0) { + pr_err("%s: Invalid NUMA node %d\n", + ne_misc_dev.name, numa_node); + + rc = -EINVAL; + + goto free_pool_cpumask; + } + } else { + if (numa_node != cpu_to_node(cpu)) { + pr_err("%s: CPUs with different NUMA nodes\n", + ne_misc_dev.name); + + rc = -EINVAL; + + goto free_pool_cpumask; + } + } + + /* + * Check if CPU 0 and its siblings are included in the provided CPU pool + * They should remain available for the primary / parent VM. + */ + if (cpumask_test_cpu(0, cpu_pool)) { + pr_err("%s: CPU 0 has to remain available\n", ne_misc_dev.name); + + rc = -EINVAL; + + goto free_pool_cpumask; + } + + for_each_cpu(cpu_sibling, topology_sibling_cpumask(0)) { + if (cpumask_test_cpu(cpu_sibling, cpu_pool)) { + pr_err("%s: CPU sibling %d for CPU 0 is in CPU pool\n", + ne_misc_dev.name, cpu_sibling); + + rc = -EINVAL; + + goto free_pool_cpumask; + } + } + + /* + * Check if CPU siblings are included in the provided CPU pool. The + * expectation is that full CPU cores are made available in the CPU pool + * for enclaves. + */ + for_each_cpu(cpu, cpu_pool) { + for_each_cpu(cpu_sibling, topology_sibling_cpumask(cpu)) { + if (!cpumask_test_cpu(cpu_sibling, cpu_pool)) { + pr_err("%s: CPU %d is not in CPU pool\n", + ne_misc_dev.name, cpu_sibling); + + rc = -EINVAL; + + goto free_pool_cpumask; + } + } + } + + /* Calculate the number of threads from a full CPU core. */ + cpu = cpumask_any(cpu_pool); + for_each_cpu(cpu_sibling, topology_sibling_cpumask(cpu)) + ne_cpu_pool.nr_threads_per_core++; + + ne_cpu_pool.nr_parent_vm_cores = nr_cpu_ids / ne_cpu_pool.nr_threads_per_core; + + ne_cpu_pool.avail_threads_per_core = kcalloc(ne_cpu_pool.nr_parent_vm_cores, + sizeof(*ne_cpu_pool.avail_threads_per_core), + GFP_KERNEL); + if (!ne_cpu_pool.avail_threads_per_core) { + rc = -ENOMEM; + + goto free_pool_cpumask; + } + + for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++) + if (!zalloc_cpumask_var(&ne_cpu_pool.avail_threads_per_core[i], GFP_KERNEL)) { + rc = -ENOMEM; + + goto free_cores_cpumask; + } + + /* + * Split the NE CPU pool in threads per core to keep the CPU topology + * after offlining the CPUs. + */ + for_each_cpu(cpu, cpu_pool) { + core_id = topology_core_id(cpu); + if (core_id < 0 || core_id >= ne_cpu_pool.nr_parent_vm_cores) { + pr_err("%s: Invalid core id %d for CPU %d\n", + ne_misc_dev.name, core_id, cpu); + + rc = -EINVAL; + + goto clear_cpumask; + } + + cpumask_set_cpu(cpu, ne_cpu_pool.avail_threads_per_core[core_id]); + } + + /* + * CPUs that are given to enclave(s) should not be considered online + * by Linux anymore, as the hypervisor will degrade them to floating. + * The physical CPUs (full cores) are carved out of the primary / parent + * VM and given to the enclave VM. The same number of vCPUs would run + * on less pCPUs for the primary / parent VM. + * + * We offline them here, to not degrade performance and expose correct + * topology to Linux and user space. + */ + for_each_cpu(cpu, cpu_pool) { + rc = remove_cpu(cpu); + if (rc != 0) { + pr_err("%s: CPU %d is not offlined [rc=%d]\n", + ne_misc_dev.name, cpu, rc); + + goto online_cpus; + } + } + + free_cpumask_var(cpu_pool); + + ne_cpu_pool.numa_node = numa_node; + + mutex_unlock(&ne_cpu_pool.mutex); + + return 0; + +online_cpus: + for_each_cpu(cpu, cpu_pool) + add_cpu(cpu); +clear_cpumask: + for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++) + cpumask_clear(ne_cpu_pool.avail_threads_per_core[i]); +free_cores_cpumask: + for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++) + free_cpumask_var(ne_cpu_pool.avail_threads_per_core[i]); + kfree(ne_cpu_pool.avail_threads_per_core); +free_pool_cpumask: + free_cpumask_var(cpu_pool); + ne_cpu_pool.nr_parent_vm_cores = 0; + ne_cpu_pool.nr_threads_per_core = 0; + ne_cpu_pool.numa_node = -1; + mutex_unlock(&ne_cpu_pool.mutex); + + return rc; +} + +/** + * ne_teardown_cpu_pool() - Online the CPUs from the NE CPU pool and cleanup the + * CPU pool. + * @void: No parameters provided. + * + * Context: Process context. + */ +static void ne_teardown_cpu_pool(void) +{ + unsigned int cpu = 0; + unsigned int i = 0; + int rc = -EINVAL; + + mutex_lock(&ne_cpu_pool.mutex); + + if (!ne_cpu_pool.nr_parent_vm_cores) { + mutex_unlock(&ne_cpu_pool.mutex); + + return; + } + + for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++) { + for_each_cpu(cpu, ne_cpu_pool.avail_threads_per_core[i]) { + rc = add_cpu(cpu); + if (rc != 0) + pr_err("%s: CPU %d is not onlined [rc=%d]\n", + ne_misc_dev.name, cpu, rc); + } + + cpumask_clear(ne_cpu_pool.avail_threads_per_core[i]); + + free_cpumask_var(ne_cpu_pool.avail_threads_per_core[i]); + } + + kfree(ne_cpu_pool.avail_threads_per_core); + ne_cpu_pool.nr_parent_vm_cores = 0; + ne_cpu_pool.nr_threads_per_core = 0; + ne_cpu_pool.numa_node = -1; + + mutex_unlock(&ne_cpu_pool.mutex); +} + +/** + * ne_set_kernel_param() - Set the NE CPU pool value via the NE kernel parameter. + * @val: NE CPU pool string value. + * @kp : NE kernel parameter associated with the NE CPU pool. + * + * Context: Process context. + * Return: + * * 0 on success. + * * Negative return value on failure. + */ +static int ne_set_kernel_param(const char *val, const struct kernel_param *kp) +{ + char error_val[] = ""; + int rc = -EINVAL; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + + if (ne_check_enclaves_created()) { + pr_err("%s: The CPU pool is used by enclave(s)\n", ne_misc_dev.name); + + return -EPERM; + } + + ne_teardown_cpu_pool(); + + rc = ne_setup_cpu_pool(val); + if (rc < 0) { + pr_err("%s: Error in setup CPU pool [rc=%d]\n", ne_misc_dev.name, rc); + + param_set_copystring(error_val, kp); + + return rc; + } + + rc = param_set_copystring(val, kp); + if (rc < 0) { + pr_err("%s: Error in param set copystring [rc=%d]\n", ne_misc_dev.name, rc); + + ne_teardown_cpu_pool(); + + param_set_copystring(error_val, kp); + + return rc; + } + + return 0; +} + +/** + * ne_donated_cpu() - Check if the provided CPU is already used by the enclave. + * @ne_enclave : Private data associated with the current enclave. + * @cpu: CPU to check if already used. + * + * Context: Process context. This function is called with the ne_enclave mutex held. + * Return: + * * True if the provided CPU is already used by the enclave. + * * False otherwise. + */ +static bool ne_donated_cpu(struct ne_enclave *ne_enclave, unsigned int cpu) +{ + if (cpumask_test_cpu(cpu, ne_enclave->vcpu_ids)) + return true; + + return false; +} + +/** + * ne_get_unused_core_from_cpu_pool() - Get the id of a full core from the + * NE CPU pool. + * @void: No parameters provided. + * + * Context: Process context. This function is called with the ne_enclave and + * ne_cpu_pool mutexes held. + * Return: + * * Core id. + * * -1 if no CPU core available in the pool. + */ +static int ne_get_unused_core_from_cpu_pool(void) +{ + int core_id = -1; + unsigned int i = 0; + + for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++) + if (!cpumask_empty(ne_cpu_pool.avail_threads_per_core[i])) { + core_id = i; + + break; + } + + return core_id; +} + +/** + * ne_set_enclave_threads_per_core() - Set the threads of the provided core in + * the enclave data structure. + * @ne_enclave : Private data associated with the current enclave. + * @core_id: Core id to get its threads from the NE CPU pool. + * @vcpu_id: vCPU id part of the provided core. + * + * Context: Process context. This function is called with the ne_enclave and + * ne_cpu_pool mutexes held. + * Return: + * * 0 on success. + * * Negative return value on failure. + */ +static int ne_set_enclave_threads_per_core(struct ne_enclave *ne_enclave, + int core_id, u32 vcpu_id) +{ + unsigned int cpu = 0; + + if (core_id < 0 && vcpu_id == 0) { + dev_err_ratelimited(ne_misc_dev.this_device, + "No CPUs available in NE CPU pool\n"); + + return -NE_ERR_NO_CPUS_AVAIL_IN_POOL; + } + + if (core_id < 0) { + dev_err_ratelimited(ne_misc_dev.this_device, + "CPU %d is not in NE CPU pool\n", vcpu_id); + + return -NE_ERR_VCPU_NOT_IN_CPU_POOL; + } + + if (core_id >= ne_enclave->nr_parent_vm_cores) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Invalid core id %d - ne_enclave\n", core_id); + + return -NE_ERR_VCPU_INVALID_CPU_CORE; + } + + for_each_cpu(cpu, ne_cpu_pool.avail_threads_per_core[core_id]) + cpumask_set_cpu(cpu, ne_enclave->threads_per_core[core_id]); + + cpumask_clear(ne_cpu_pool.avail_threads_per_core[core_id]); + + return 0; +} + +/** + * ne_get_cpu_from_cpu_pool() - Get a CPU from the NE CPU pool, either from the + * remaining sibling(s) of a CPU core or the first + * sibling of a new CPU core. + * @ne_enclave : Private data associated with the current enclave. + * @vcpu_id: vCPU to get from the NE CPU pool. + * + * Context: Process context. This function is called with the ne_enclave mutex held. + * Return: + * * 0 on success. + * * Negative return value on failure. + */ +static int ne_get_cpu_from_cpu_pool(struct ne_enclave *ne_enclave, u32 *vcpu_id) +{ + int core_id = -1; + unsigned int cpu = 0; + unsigned int i = 0; + int rc = -EINVAL; + + /* + * If previously allocated a thread of a core to this enclave, first + * check remaining sibling(s) for new CPU allocations, so that full + * CPU cores are used for the enclave. + */ + for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) + for_each_cpu(cpu, ne_enclave->threads_per_core[i]) + if (!ne_donated_cpu(ne_enclave, cpu)) { + *vcpu_id = cpu; + + return 0; + } + + mutex_lock(&ne_cpu_pool.mutex); + + /* + * If no remaining siblings, get a core from the NE CPU pool and keep + * track of all the threads in the enclave threads per core data structure. + */ + core_id = ne_get_unused_core_from_cpu_pool(); + + rc = ne_set_enclave_threads_per_core(ne_enclave, core_id, *vcpu_id); + if (rc < 0) + goto unlock_mutex; + + *vcpu_id = cpumask_any(ne_enclave->threads_per_core[core_id]); + + rc = 0; + +unlock_mutex: + mutex_unlock(&ne_cpu_pool.mutex); + + return rc; +} + +/** + * ne_get_vcpu_core_from_cpu_pool() - Get from the NE CPU pool the id of the + * core associated with the provided vCPU. + * @vcpu_id: Provided vCPU id to get its associated core id. + * + * Context: Process context. This function is called with the ne_enclave and + * ne_cpu_pool mutexes held. + * Return: + * * Core id. + * * -1 if the provided vCPU is not in the pool. + */ +static int ne_get_vcpu_core_from_cpu_pool(u32 vcpu_id) +{ + int core_id = -1; + unsigned int i = 0; + + for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++) + if (cpumask_test_cpu(vcpu_id, ne_cpu_pool.avail_threads_per_core[i])) { + core_id = i; + + break; + } + + return core_id; +} + +/** + * ne_check_cpu_in_cpu_pool() - Check if the given vCPU is in the available CPUs + * from the pool. + * @ne_enclave : Private data associated with the current enclave. + * @vcpu_id: ID of the vCPU to check if available in the NE CPU pool. + * + * Context: Process context. This function is called with the ne_enclave mutex held. + * Return: + * * 0 on success. + * * Negative return value on failure. + */ +static int ne_check_cpu_in_cpu_pool(struct ne_enclave *ne_enclave, u32 vcpu_id) +{ + int core_id = -1; + unsigned int i = 0; + int rc = -EINVAL; + + if (ne_donated_cpu(ne_enclave, vcpu_id)) { + dev_err_ratelimited(ne_misc_dev.this_device, + "CPU %d already used\n", vcpu_id); + + return -NE_ERR_VCPU_ALREADY_USED; + } + + /* + * If previously allocated a thread of a core to this enclave, but not + * the full core, first check remaining sibling(s). + */ + for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) + if (cpumask_test_cpu(vcpu_id, ne_enclave->threads_per_core[i])) + return 0; + + mutex_lock(&ne_cpu_pool.mutex); + + /* + * If no remaining siblings, get from the NE CPU pool the core + * associated with the vCPU and keep track of all the threads in the + * enclave threads per core data structure. + */ + core_id = ne_get_vcpu_core_from_cpu_pool(vcpu_id); + + rc = ne_set_enclave_threads_per_core(ne_enclave, core_id, vcpu_id); + if (rc < 0) + goto unlock_mutex; + + rc = 0; + +unlock_mutex: + mutex_unlock(&ne_cpu_pool.mutex); + + return rc; +} + +/** + * ne_add_vcpu_ioctl() - Add a vCPU to the slot associated with the current + * enclave. + * @ne_enclave : Private data associated with the current enclave. + * @vcpu_id: ID of the CPU to be associated with the given slot, + * apic id on x86. + * + * Context: Process context. This function is called with the ne_enclave mutex held. + * Return: + * * 0 on success. + * * Negative return value on failure. + */ +static int ne_add_vcpu_ioctl(struct ne_enclave *ne_enclave, u32 vcpu_id) +{ + struct ne_pci_dev_cmd_reply cmd_reply = {}; + struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev; + int rc = -EINVAL; + struct slot_add_vcpu_req slot_add_vcpu_req = {}; + + if (ne_enclave->mm != current->mm) + return -EIO; + + slot_add_vcpu_req.slot_uid = ne_enclave->slot_uid; + slot_add_vcpu_req.vcpu_id = vcpu_id; + + rc = ne_do_request(pdev, SLOT_ADD_VCPU, + &slot_add_vcpu_req, sizeof(slot_add_vcpu_req), + &cmd_reply, sizeof(cmd_reply)); + if (rc < 0) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Error in slot add vCPU [rc=%d]\n", rc); + + return rc; + } + + cpumask_set_cpu(vcpu_id, ne_enclave->vcpu_ids); + + ne_enclave->nr_vcpus++; + + return 0; +} + +/** + * ne_sanity_check_user_mem_region() - Sanity check the user space memory + * region received during the set user + * memory region ioctl call. + * @ne_enclave : Private data associated with the current enclave. + * @mem_region : User space memory region to be sanity checked. + * + * Context: Process context. This function is called with the ne_enclave mutex held. + * Return: + * * 0 on success. + * * Negative return value on failure. + */ +static int ne_sanity_check_user_mem_region(struct ne_enclave *ne_enclave, + struct ne_user_memory_region mem_region) +{ + struct ne_mem_region *ne_mem_region = NULL; + + if (ne_enclave->mm != current->mm) + return -EIO; + + if (mem_region.memory_size & (NE_MIN_MEM_REGION_SIZE - 1)) { + dev_err_ratelimited(ne_misc_dev.this_device, + "User space memory size is not multiple of 2 MiB\n"); + + return -NE_ERR_INVALID_MEM_REGION_SIZE; + } + + if (!IS_ALIGNED(mem_region.userspace_addr, NE_MIN_MEM_REGION_SIZE)) { + dev_err_ratelimited(ne_misc_dev.this_device, + "User space address is not 2 MiB aligned\n"); + + return -NE_ERR_UNALIGNED_MEM_REGION_ADDR; + } + + if ((mem_region.userspace_addr & (NE_MIN_MEM_REGION_SIZE - 1)) || + !access_ok((void __user *)(unsigned long)mem_region.userspace_addr, + mem_region.memory_size)) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Invalid user space address range\n"); + + return -NE_ERR_INVALID_MEM_REGION_ADDR; + } + + list_for_each_entry(ne_mem_region, &ne_enclave->mem_regions_list, + mem_region_list_entry) { + u64 memory_size = ne_mem_region->memory_size; + u64 userspace_addr = ne_mem_region->userspace_addr; + + if ((userspace_addr <= mem_region.userspace_addr && + mem_region.userspace_addr < (userspace_addr + memory_size)) || + (mem_region.userspace_addr <= userspace_addr && + (mem_region.userspace_addr + mem_region.memory_size) > userspace_addr)) { + dev_err_ratelimited(ne_misc_dev.this_device, + "User space memory region already used\n"); + + return -NE_ERR_MEM_REGION_ALREADY_USED; + } + } + + return 0; +} + +/** + * ne_sanity_check_user_mem_region_page() - Sanity check a page from the user space + * memory region received during the set + * user memory region ioctl call. + * @ne_enclave : Private data associated with the current enclave. + * @mem_region_page: Page from the user space memory region to be sanity checked. + * + * Context: Process context. This function is called with the ne_enclave mutex held. + * Return: + * * 0 on success. + * * Negative return value on failure. + */ +static int ne_sanity_check_user_mem_region_page(struct ne_enclave *ne_enclave, + struct page *mem_region_page) +{ + if (!PageHuge(mem_region_page)) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Not a hugetlbfs page\n"); + + return -NE_ERR_MEM_NOT_HUGE_PAGE; + } + + if (page_size(mem_region_page) & (NE_MIN_MEM_REGION_SIZE - 1)) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Page size not multiple of 2 MiB\n"); + + return -NE_ERR_INVALID_PAGE_SIZE; + } + + if (ne_enclave->numa_node != page_to_nid(mem_region_page)) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Page is not from NUMA node %d\n", + ne_enclave->numa_node); + + return -NE_ERR_MEM_DIFFERENT_NUMA_NODE; + } + + return 0; +} + +/** + * ne_sanity_check_phys_mem_region() - Sanity check the start address and the size + * of a physical memory region. + * @phys_mem_region_paddr : Physical start address of the region to be sanity checked. + * @phys_mem_region_size : Length of the region to be sanity checked. + * + * Context: Process context. This function is called with the ne_enclave mutex held. + * Return: + * * 0 on success. + * * Negative return value on failure. + */ +static int ne_sanity_check_phys_mem_region(u64 phys_mem_region_paddr, + u64 phys_mem_region_size) +{ + if (phys_mem_region_size & (NE_MIN_MEM_REGION_SIZE - 1)) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Physical mem region size is not multiple of 2 MiB\n"); + + return -EINVAL; + } + + if (!IS_ALIGNED(phys_mem_region_paddr, NE_MIN_MEM_REGION_SIZE)) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Physical mem region address is not 2 MiB aligned\n"); + + return -EINVAL; + } + + return 0; +} + +/** + * ne_merge_phys_contig_memory_regions() - Add a memory region and merge the adjacent + * regions if they are physically contiguous. + * @phys_contig_regions : Private data associated with the contiguous physical memory regions. + * @page_paddr : Physical start address of the region to be added. + * @page_size : Length of the region to be added. + * + * Context: Process context. This function is called with the ne_enclave mutex held. + * Return: + * * 0 on success. + * * Negative return value on failure. + */ +static int +ne_merge_phys_contig_memory_regions(struct ne_phys_contig_mem_regions *phys_contig_regions, + u64 page_paddr, u64 page_size) +{ + unsigned long num = phys_contig_regions->num; + int rc = 0; + + rc = ne_sanity_check_phys_mem_region(page_paddr, page_size); + if (rc < 0) + return rc; + + /* Physically contiguous, just merge */ + if (num && (phys_contig_regions->regions[num - 1].end + 1) == page_paddr) { + phys_contig_regions->regions[num - 1].end += page_size; + } else { + phys_contig_regions->regions[num].start = page_paddr; + phys_contig_regions->regions[num].end = page_paddr + page_size - 1; + phys_contig_regions->num++; + } + + return 0; +} + +/** + * ne_set_user_memory_region_ioctl() - Add user space memory region to the slot + * associated with the current enclave. + * @ne_enclave : Private data associated with the current enclave. + * @mem_region : User space memory region to be associated with the given slot. + * + * Context: Process context. This function is called with the ne_enclave mutex held. + * Return: + * * 0 on success. + * * Negative return value on failure. + */ +static int ne_set_user_memory_region_ioctl(struct ne_enclave *ne_enclave, + struct ne_user_memory_region mem_region) +{ + long gup_rc = 0; + unsigned long i = 0; + unsigned long max_nr_pages = 0; + unsigned long memory_size = 0; + struct ne_mem_region *ne_mem_region = NULL; + struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev; + struct ne_phys_contig_mem_regions phys_contig_mem_regions = {}; + int rc = -EINVAL; + + rc = ne_sanity_check_user_mem_region(ne_enclave, mem_region); + if (rc < 0) + return rc; + + ne_mem_region = kzalloc(sizeof(*ne_mem_region), GFP_KERNEL); + if (!ne_mem_region) + return -ENOMEM; + + max_nr_pages = mem_region.memory_size / NE_MIN_MEM_REGION_SIZE; + + ne_mem_region->pages = kcalloc(max_nr_pages, sizeof(*ne_mem_region->pages), + GFP_KERNEL); + if (!ne_mem_region->pages) { + rc = -ENOMEM; + + goto free_mem_region; + } + + phys_contig_mem_regions.regions = kcalloc(max_nr_pages, + sizeof(*phys_contig_mem_regions.regions), + GFP_KERNEL); + if (!phys_contig_mem_regions.regions) { + rc = -ENOMEM; + + goto free_mem_region; + } + + do { + i = ne_mem_region->nr_pages; + + if (i == max_nr_pages) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Reached max nr of pages in the pages data struct\n"); + + rc = -ENOMEM; + + goto put_pages; + } + + gup_rc = get_user_pages_unlocked(mem_region.userspace_addr + memory_size, 1, + ne_mem_region->pages + i, FOLL_GET); + + if (gup_rc < 0) { + rc = gup_rc; + + dev_err_ratelimited(ne_misc_dev.this_device, + "Error in get user pages [rc=%d]\n", rc); + + goto put_pages; + } + + rc = ne_sanity_check_user_mem_region_page(ne_enclave, ne_mem_region->pages[i]); + if (rc < 0) + goto put_pages; + + rc = ne_merge_phys_contig_memory_regions(&phys_contig_mem_regions, + page_to_phys(ne_mem_region->pages[i]), + page_size(ne_mem_region->pages[i])); + if (rc < 0) + goto put_pages; + + memory_size += page_size(ne_mem_region->pages[i]); + + ne_mem_region->nr_pages++; + } while (memory_size < mem_region.memory_size); + + if ((ne_enclave->nr_mem_regions + phys_contig_mem_regions.num) > + ne_enclave->max_mem_regions) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Reached max memory regions %lld\n", + ne_enclave->max_mem_regions); + + rc = -NE_ERR_MEM_MAX_REGIONS; + + goto put_pages; + } + + for (i = 0; i < phys_contig_mem_regions.num; i++) { + u64 phys_region_addr = phys_contig_mem_regions.regions[i].start; + u64 phys_region_size = range_len(&phys_contig_mem_regions.regions[i]); + + rc = ne_sanity_check_phys_mem_region(phys_region_addr, phys_region_size); + if (rc < 0) + goto put_pages; + } + + ne_mem_region->memory_size = mem_region.memory_size; + ne_mem_region->userspace_addr = mem_region.userspace_addr; + + list_add(&ne_mem_region->mem_region_list_entry, &ne_enclave->mem_regions_list); + + for (i = 0; i < phys_contig_mem_regions.num; i++) { + struct ne_pci_dev_cmd_reply cmd_reply = {}; + struct slot_add_mem_req slot_add_mem_req = {}; + + slot_add_mem_req.slot_uid = ne_enclave->slot_uid; + slot_add_mem_req.paddr = phys_contig_mem_regions.regions[i].start; + slot_add_mem_req.size = range_len(&phys_contig_mem_regions.regions[i]); + + rc = ne_do_request(pdev, SLOT_ADD_MEM, + &slot_add_mem_req, sizeof(slot_add_mem_req), + &cmd_reply, sizeof(cmd_reply)); + if (rc < 0) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Error in slot add mem [rc=%d]\n", rc); + + kfree(phys_contig_mem_regions.regions); + + /* + * Exit here without put pages as memory regions may + * already been added. + */ + return rc; + } + + ne_enclave->mem_size += slot_add_mem_req.size; + ne_enclave->nr_mem_regions++; + } + + kfree(phys_contig_mem_regions.regions); + + return 0; + +put_pages: + for (i = 0; i < ne_mem_region->nr_pages; i++) + put_page(ne_mem_region->pages[i]); +free_mem_region: + kfree(phys_contig_mem_regions.regions); + kfree(ne_mem_region->pages); + kfree(ne_mem_region); + + return rc; +} + +/** + * ne_start_enclave_ioctl() - Trigger enclave start after the enclave resources, + * such as memory and CPU, have been set. + * @ne_enclave : Private data associated with the current enclave. + * @enclave_start_info : Enclave info that includes enclave cid and flags. + * + * Context: Process context. This function is called with the ne_enclave mutex held. + * Return: + * * 0 on success. + * * Negative return value on failure. + */ +static int ne_start_enclave_ioctl(struct ne_enclave *ne_enclave, + struct ne_enclave_start_info *enclave_start_info) +{ + struct ne_pci_dev_cmd_reply cmd_reply = {}; + unsigned int cpu = 0; + struct enclave_start_req enclave_start_req = {}; + unsigned int i = 0; + struct pci_dev *pdev = ne_devs.ne_pci_dev->pdev; + int rc = -EINVAL; + + if (!ne_enclave->nr_mem_regions) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Enclave has no mem regions\n"); + + return -NE_ERR_NO_MEM_REGIONS_ADDED; + } + + if (ne_enclave->mem_size < NE_MIN_ENCLAVE_MEM_SIZE) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Enclave memory is less than %ld\n", + NE_MIN_ENCLAVE_MEM_SIZE); + + return -NE_ERR_ENCLAVE_MEM_MIN_SIZE; + } + + if (!ne_enclave->nr_vcpus) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Enclave has no vCPUs\n"); + + return -NE_ERR_NO_VCPUS_ADDED; + } + + for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) + for_each_cpu(cpu, ne_enclave->threads_per_core[i]) + if (!cpumask_test_cpu(cpu, ne_enclave->vcpu_ids)) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Full CPU cores not used\n"); + + return -NE_ERR_FULL_CORES_NOT_USED; + } + + enclave_start_req.enclave_cid = enclave_start_info->enclave_cid; + enclave_start_req.flags = enclave_start_info->flags; + enclave_start_req.slot_uid = ne_enclave->slot_uid; + + rc = ne_do_request(pdev, ENCLAVE_START, + &enclave_start_req, sizeof(enclave_start_req), + &cmd_reply, sizeof(cmd_reply)); + if (rc < 0) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Error in enclave start [rc=%d]\n", rc); + + return rc; + } + + ne_enclave->state = NE_STATE_RUNNING; + + enclave_start_info->enclave_cid = cmd_reply.enclave_cid; + + return 0; +} + +/** + * ne_enclave_ioctl() - Ioctl function provided by the enclave file. + * @file: File associated with this ioctl function. + * @cmd: The command that is set for the ioctl call. + * @arg: The argument that is provided for the ioctl call. + * + * Context: Process context. + * Return: + * * 0 on success. + * * Negative return value on failure. + */ +static long ne_enclave_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + struct ne_enclave *ne_enclave = file->private_data; + + switch (cmd) { + case NE_ADD_VCPU: { + int rc = -EINVAL; + u32 vcpu_id = 0; + + if (copy_from_user(&vcpu_id, (void __user *)arg, sizeof(vcpu_id))) + return -EFAULT; + + mutex_lock(&ne_enclave->enclave_info_mutex); + + if (ne_enclave->state != NE_STATE_INIT) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Enclave is not in init state\n"); + + mutex_unlock(&ne_enclave->enclave_info_mutex); + + return -NE_ERR_NOT_IN_INIT_STATE; + } + + if (vcpu_id >= (ne_enclave->nr_parent_vm_cores * + ne_enclave->nr_threads_per_core)) { + dev_err_ratelimited(ne_misc_dev.this_device, + "vCPU id higher than max CPU id\n"); + + mutex_unlock(&ne_enclave->enclave_info_mutex); + + return -NE_ERR_INVALID_VCPU; + } + + if (!vcpu_id) { + /* Use the CPU pool for choosing a CPU for the enclave. */ + rc = ne_get_cpu_from_cpu_pool(ne_enclave, &vcpu_id); + if (rc < 0) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Error in get CPU from pool [rc=%d]\n", + rc); + + mutex_unlock(&ne_enclave->enclave_info_mutex); + + return rc; + } + } else { + /* Check if the provided vCPU is available in the NE CPU pool. */ + rc = ne_check_cpu_in_cpu_pool(ne_enclave, vcpu_id); + if (rc < 0) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Error in check CPU %d in pool [rc=%d]\n", + vcpu_id, rc); + + mutex_unlock(&ne_enclave->enclave_info_mutex); + + return rc; + } + } + + rc = ne_add_vcpu_ioctl(ne_enclave, vcpu_id); + if (rc < 0) { + mutex_unlock(&ne_enclave->enclave_info_mutex); + + return rc; + } + + mutex_unlock(&ne_enclave->enclave_info_mutex); + + if (copy_to_user((void __user *)arg, &vcpu_id, sizeof(vcpu_id))) + return -EFAULT; + + return 0; + } + + case NE_GET_IMAGE_LOAD_INFO: { + struct ne_image_load_info image_load_info = {}; + + if (copy_from_user(&image_load_info, (void __user *)arg, sizeof(image_load_info))) + return -EFAULT; + + mutex_lock(&ne_enclave->enclave_info_mutex); + + if (ne_enclave->state != NE_STATE_INIT) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Enclave is not in init state\n"); + + mutex_unlock(&ne_enclave->enclave_info_mutex); + + return -NE_ERR_NOT_IN_INIT_STATE; + } + + mutex_unlock(&ne_enclave->enclave_info_mutex); + + if (!image_load_info.flags || + image_load_info.flags >= NE_IMAGE_LOAD_MAX_FLAG_VAL) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Incorrect flag in enclave image load info\n"); + + return -NE_ERR_INVALID_FLAG_VALUE; + } + + if (image_load_info.flags == NE_EIF_IMAGE) + image_load_info.memory_offset = NE_EIF_LOAD_OFFSET; + + if (copy_to_user((void __user *)arg, &image_load_info, sizeof(image_load_info))) + return -EFAULT; + + return 0; + } + + case NE_SET_USER_MEMORY_REGION: { + struct ne_user_memory_region mem_region = {}; + int rc = -EINVAL; + + if (copy_from_user(&mem_region, (void __user *)arg, sizeof(mem_region))) + return -EFAULT; + + if (mem_region.flags >= NE_MEMORY_REGION_MAX_FLAG_VAL) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Incorrect flag for user memory region\n"); + + return -NE_ERR_INVALID_FLAG_VALUE; + } + + mutex_lock(&ne_enclave->enclave_info_mutex); + + if (ne_enclave->state != NE_STATE_INIT) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Enclave is not in init state\n"); + + mutex_unlock(&ne_enclave->enclave_info_mutex); + + return -NE_ERR_NOT_IN_INIT_STATE; + } + + rc = ne_set_user_memory_region_ioctl(ne_enclave, mem_region); + if (rc < 0) { + mutex_unlock(&ne_enclave->enclave_info_mutex); + + return rc; + } + + mutex_unlock(&ne_enclave->enclave_info_mutex); + + return 0; + } + + case NE_START_ENCLAVE: { + struct ne_enclave_start_info enclave_start_info = {}; + int rc = -EINVAL; + + if (copy_from_user(&enclave_start_info, (void __user *)arg, + sizeof(enclave_start_info))) + return -EFAULT; + + if (enclave_start_info.flags >= NE_ENCLAVE_START_MAX_FLAG_VAL) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Incorrect flag in enclave start info\n"); + + return -NE_ERR_INVALID_FLAG_VALUE; + } + + /* + * Do not use well-known CIDs - 0, 1, 2 - for enclaves. + * VMADDR_CID_ANY = -1U + * VMADDR_CID_HYPERVISOR = 0 + * VMADDR_CID_LOCAL = 1 + * VMADDR_CID_HOST = 2 + * Note: 0 is used as a placeholder to auto-generate an enclave CID. + * http://man7.org/linux/man-pages/man7/vsock.7.html + */ + if (enclave_start_info.enclave_cid > 0 && + enclave_start_info.enclave_cid <= VMADDR_CID_HOST) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Well-known CID value, not to be used for enclaves\n"); + + return -NE_ERR_INVALID_ENCLAVE_CID; + } + + if (enclave_start_info.enclave_cid == U32_MAX) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Well-known CID value, not to be used for enclaves\n"); + + return -NE_ERR_INVALID_ENCLAVE_CID; + } + + /* + * Do not use the CID of the primary / parent VM for enclaves. + */ + if (enclave_start_info.enclave_cid == NE_PARENT_VM_CID) { + dev_err_ratelimited(ne_misc_dev.this_device, + "CID of the parent VM, not to be used for enclaves\n"); + + return -NE_ERR_INVALID_ENCLAVE_CID; + } + + /* 64-bit CIDs are not yet supported for the vsock device. */ + if (enclave_start_info.enclave_cid > U32_MAX) { + dev_err_ratelimited(ne_misc_dev.this_device, + "64-bit CIDs not yet supported for the vsock device\n"); + + return -NE_ERR_INVALID_ENCLAVE_CID; + } + + mutex_lock(&ne_enclave->enclave_info_mutex); + + if (ne_enclave->state != NE_STATE_INIT) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Enclave is not in init state\n"); + + mutex_unlock(&ne_enclave->enclave_info_mutex); + + return -NE_ERR_NOT_IN_INIT_STATE; + } + + rc = ne_start_enclave_ioctl(ne_enclave, &enclave_start_info); + if (rc < 0) { + mutex_unlock(&ne_enclave->enclave_info_mutex); + + return rc; + } + + mutex_unlock(&ne_enclave->enclave_info_mutex); + + if (copy_to_user((void __user *)arg, &enclave_start_info, + sizeof(enclave_start_info))) + return -EFAULT; + + return 0; + } + + default: + return -ENOTTY; + } + + return 0; +} + +/** + * ne_enclave_remove_all_mem_region_entries() - Remove all memory region entries + * from the enclave data structure. + * @ne_enclave : Private data associated with the current enclave. + * + * Context: Process context. This function is called with the ne_enclave mutex held. + */ +static void ne_enclave_remove_all_mem_region_entries(struct ne_enclave *ne_enclave) +{ + unsigned long i = 0; + struct ne_mem_region *ne_mem_region = NULL; + struct ne_mem_region *ne_mem_region_tmp = NULL; + + list_for_each_entry_safe(ne_mem_region, ne_mem_region_tmp, + &ne_enclave->mem_regions_list, + mem_region_list_entry) { + list_del(&ne_mem_region->mem_region_list_entry); + + for (i = 0; i < ne_mem_region->nr_pages; i++) + put_page(ne_mem_region->pages[i]); + + kfree(ne_mem_region->pages); + + kfree(ne_mem_region); + } +} + +/** + * ne_enclave_remove_all_vcpu_id_entries() - Remove all vCPU id entries from + * the enclave data structure. + * @ne_enclave : Private data associated with the current enclave. + * + * Context: Process context. This function is called with the ne_enclave mutex held. + */ +static void ne_enclave_remove_all_vcpu_id_entries(struct ne_enclave *ne_enclave) +{ + unsigned int cpu = 0; + unsigned int i = 0; + + mutex_lock(&ne_cpu_pool.mutex); + + for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) { + for_each_cpu(cpu, ne_enclave->threads_per_core[i]) + /* Update the available NE CPU pool. */ + cpumask_set_cpu(cpu, ne_cpu_pool.avail_threads_per_core[i]); + + free_cpumask_var(ne_enclave->threads_per_core[i]); + } + + mutex_unlock(&ne_cpu_pool.mutex); + + kfree(ne_enclave->threads_per_core); + + free_cpumask_var(ne_enclave->vcpu_ids); +} + +/** + * ne_pci_dev_remove_enclave_entry() - Remove the enclave entry from the data + * structure that is part of the NE PCI + * device private data. + * @ne_enclave : Private data associated with the current enclave. + * @ne_pci_dev : Private data associated with the PCI device. + * + * Context: Process context. This function is called with the ne_pci_dev enclave + * mutex held. + */ +static void ne_pci_dev_remove_enclave_entry(struct ne_enclave *ne_enclave, + struct ne_pci_dev *ne_pci_dev) +{ + struct ne_enclave *ne_enclave_entry = NULL; + struct ne_enclave *ne_enclave_entry_tmp = NULL; + + list_for_each_entry_safe(ne_enclave_entry, ne_enclave_entry_tmp, + &ne_pci_dev->enclaves_list, enclave_list_entry) { + if (ne_enclave_entry->slot_uid == ne_enclave->slot_uid) { + list_del(&ne_enclave_entry->enclave_list_entry); + + break; + } + } +} + +/** + * ne_enclave_release() - Release function provided by the enclave file. + * @inode: Inode associated with this file release function. + * @file: File associated with this release function. + * + * Context: Process context. + * Return: + * * 0 on success. + * * Negative return value on failure. + */ +static int ne_enclave_release(struct inode *inode, struct file *file) +{ + struct ne_pci_dev_cmd_reply cmd_reply = {}; + struct enclave_stop_req enclave_stop_request = {}; + struct ne_enclave *ne_enclave = file->private_data; + struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev; + struct pci_dev *pdev = ne_pci_dev->pdev; + int rc = -EINVAL; + struct slot_free_req slot_free_req = {}; + + if (!ne_enclave) + return 0; + + /* + * Early exit in case there is an error in the enclave creation logic + * and fput() is called on the cleanup path. + */ + if (!ne_enclave->slot_uid) + return 0; + + /* + * Acquire the enclave list mutex before the enclave mutex + * in order to avoid deadlocks with @ref ne_event_work_handler. + */ + mutex_lock(&ne_pci_dev->enclaves_list_mutex); + mutex_lock(&ne_enclave->enclave_info_mutex); + + if (ne_enclave->state != NE_STATE_INIT && ne_enclave->state != NE_STATE_STOPPED) { + enclave_stop_request.slot_uid = ne_enclave->slot_uid; + + rc = ne_do_request(pdev, ENCLAVE_STOP, + &enclave_stop_request, sizeof(enclave_stop_request), + &cmd_reply, sizeof(cmd_reply)); + if (rc < 0) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Error in enclave stop [rc=%d]\n", rc); + + goto unlock_mutex; + } + + memset(&cmd_reply, 0, sizeof(cmd_reply)); + } + + slot_free_req.slot_uid = ne_enclave->slot_uid; + + rc = ne_do_request(pdev, SLOT_FREE, + &slot_free_req, sizeof(slot_free_req), + &cmd_reply, sizeof(cmd_reply)); + if (rc < 0) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Error in slot free [rc=%d]\n", rc); + + goto unlock_mutex; + } + + ne_pci_dev_remove_enclave_entry(ne_enclave, ne_pci_dev); + ne_enclave_remove_all_mem_region_entries(ne_enclave); + ne_enclave_remove_all_vcpu_id_entries(ne_enclave); + + mutex_unlock(&ne_enclave->enclave_info_mutex); + mutex_unlock(&ne_pci_dev->enclaves_list_mutex); + + kfree(ne_enclave); + + return 0; + +unlock_mutex: + mutex_unlock(&ne_enclave->enclave_info_mutex); + mutex_unlock(&ne_pci_dev->enclaves_list_mutex); + + return rc; +} + +/** + * ne_enclave_poll() - Poll functionality used for enclave out-of-band events. + * @file: File associated with this poll function. + * @wait: Poll table data structure. + * + * Context: Process context. + * Return: + * * Poll mask. + */ +static __poll_t ne_enclave_poll(struct file *file, poll_table *wait) +{ + __poll_t mask = 0; + struct ne_enclave *ne_enclave = file->private_data; + + poll_wait(file, &ne_enclave->eventq, wait); + + if (ne_enclave->has_event) + mask |= EPOLLHUP; + + return mask; +} + +static const struct file_operations ne_enclave_fops = { + .owner = THIS_MODULE, + .llseek = noop_llseek, + .poll = ne_enclave_poll, + .unlocked_ioctl = ne_enclave_ioctl, + .release = ne_enclave_release, +}; + +/** + * ne_create_vm_ioctl() - Alloc slot to be associated with an enclave. Create + * enclave file descriptor to be further used for enclave + * resources handling e.g. memory regions and CPUs. + * @ne_pci_dev : Private data associated with the PCI device. + * @slot_uid: User pointer to store the generated unique slot id + * associated with an enclave to. + * + * Context: Process context. This function is called with the ne_pci_dev enclave + * mutex held. + * Return: + * * Enclave fd on success. + * * Negative return value on failure. + */ +static int ne_create_vm_ioctl(struct ne_pci_dev *ne_pci_dev, u64 __user *slot_uid) +{ + struct ne_pci_dev_cmd_reply cmd_reply = {}; + int enclave_fd = -1; + struct file *enclave_file = NULL; + unsigned int i = 0; + struct ne_enclave *ne_enclave = NULL; + struct pci_dev *pdev = ne_pci_dev->pdev; + int rc = -EINVAL; + struct slot_alloc_req slot_alloc_req = {}; + + mutex_lock(&ne_cpu_pool.mutex); + + for (i = 0; i < ne_cpu_pool.nr_parent_vm_cores; i++) + if (!cpumask_empty(ne_cpu_pool.avail_threads_per_core[i])) + break; + + if (i == ne_cpu_pool.nr_parent_vm_cores) { + dev_err_ratelimited(ne_misc_dev.this_device, + "No CPUs available in CPU pool\n"); + + mutex_unlock(&ne_cpu_pool.mutex); + + return -NE_ERR_NO_CPUS_AVAIL_IN_POOL; + } + + mutex_unlock(&ne_cpu_pool.mutex); + + ne_enclave = kzalloc(sizeof(*ne_enclave), GFP_KERNEL); + if (!ne_enclave) + return -ENOMEM; + + mutex_lock(&ne_cpu_pool.mutex); + + ne_enclave->nr_parent_vm_cores = ne_cpu_pool.nr_parent_vm_cores; + ne_enclave->nr_threads_per_core = ne_cpu_pool.nr_threads_per_core; + ne_enclave->numa_node = ne_cpu_pool.numa_node; + + mutex_unlock(&ne_cpu_pool.mutex); + + ne_enclave->threads_per_core = kcalloc(ne_enclave->nr_parent_vm_cores, + sizeof(*ne_enclave->threads_per_core), + GFP_KERNEL); + if (!ne_enclave->threads_per_core) { + rc = -ENOMEM; + + goto free_ne_enclave; + } + + for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) + if (!zalloc_cpumask_var(&ne_enclave->threads_per_core[i], GFP_KERNEL)) { + rc = -ENOMEM; + + goto free_cpumask; + } + + if (!zalloc_cpumask_var(&ne_enclave->vcpu_ids, GFP_KERNEL)) { + rc = -ENOMEM; + + goto free_cpumask; + } + + enclave_fd = get_unused_fd_flags(O_CLOEXEC); + if (enclave_fd < 0) { + rc = enclave_fd; + + dev_err_ratelimited(ne_misc_dev.this_device, + "Error in getting unused fd [rc=%d]\n", rc); + + goto free_cpumask; + } + + enclave_file = anon_inode_getfile("ne-vm", &ne_enclave_fops, ne_enclave, O_RDWR); + if (IS_ERR(enclave_file)) { + rc = PTR_ERR(enclave_file); + + dev_err_ratelimited(ne_misc_dev.this_device, + "Error in anon inode get file [rc=%d]\n", rc); + + goto put_fd; + } + + rc = ne_do_request(pdev, SLOT_ALLOC, + &slot_alloc_req, sizeof(slot_alloc_req), + &cmd_reply, sizeof(cmd_reply)); + if (rc < 0) { + dev_err_ratelimited(ne_misc_dev.this_device, + "Error in slot alloc [rc=%d]\n", rc); + + goto put_file; + } + + init_waitqueue_head(&ne_enclave->eventq); + ne_enclave->has_event = false; + mutex_init(&ne_enclave->enclave_info_mutex); + ne_enclave->max_mem_regions = cmd_reply.mem_regions; + INIT_LIST_HEAD(&ne_enclave->mem_regions_list); + ne_enclave->mm = current->mm; + ne_enclave->slot_uid = cmd_reply.slot_uid; + ne_enclave->state = NE_STATE_INIT; + + list_add(&ne_enclave->enclave_list_entry, &ne_pci_dev->enclaves_list); + + if (copy_to_user(slot_uid, &ne_enclave->slot_uid, sizeof(ne_enclave->slot_uid))) { + /* + * As we're holding the only reference to 'enclave_file', fput() + * will call ne_enclave_release() which will do a proper cleanup + * of all so far allocated resources, leaving only the unused fd + * for us to free. + */ + fput(enclave_file); + put_unused_fd(enclave_fd); + + return -EFAULT; + } + + fd_install(enclave_fd, enclave_file); + + return enclave_fd; + +put_file: + fput(enclave_file); +put_fd: + put_unused_fd(enclave_fd); +free_cpumask: + free_cpumask_var(ne_enclave->vcpu_ids); + for (i = 0; i < ne_enclave->nr_parent_vm_cores; i++) + free_cpumask_var(ne_enclave->threads_per_core[i]); + kfree(ne_enclave->threads_per_core); +free_ne_enclave: + kfree(ne_enclave); + + return rc; +} + +/** + * ne_ioctl() - Ioctl function provided by the NE misc device. + * @file: File associated with this ioctl function. + * @cmd: The command that is set for the ioctl call. + * @arg: The argument that is provided for the ioctl call. + * + * Context: Process context. + * Return: + * * Ioctl result (e.g. enclave file descriptor) on success. + * * Negative return value on failure. + */ +static long ne_ioctl(struct file *file, unsigned int cmd, unsigned long arg) +{ + switch (cmd) { + case NE_CREATE_VM: { + int enclave_fd = -1; + struct ne_pci_dev *ne_pci_dev = ne_devs.ne_pci_dev; + u64 __user *slot_uid = (void __user *)arg; + + mutex_lock(&ne_pci_dev->enclaves_list_mutex); + enclave_fd = ne_create_vm_ioctl(ne_pci_dev, slot_uid); + mutex_unlock(&ne_pci_dev->enclaves_list_mutex); + + return enclave_fd; + } + + default: + return -ENOTTY; + } + + return 0; +} + +#if defined(CONFIG_NITRO_ENCLAVES_MISC_DEV_TEST) +#include "ne_misc_dev_test.c" +#endif + +static int __init ne_init(void) +{ + mutex_init(&ne_cpu_pool.mutex); + + return pci_register_driver(&ne_pci_driver); +} + +static void __exit ne_exit(void) +{ + pci_unregister_driver(&ne_pci_driver); + + ne_teardown_cpu_pool(); +} + +module_init(ne_init); +module_exit(ne_exit); + +MODULE_AUTHOR("Amazon.com, Inc. or its affiliates"); +MODULE_DESCRIPTION("Nitro Enclaves Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev.h b/drivers/virt/nitro_enclaves/ne_misc_dev.h new file mode 100644 index 000000000..2a4d2224b --- /dev/null +++ b/drivers/virt/nitro_enclaves/ne_misc_dev.h @@ -0,0 +1,109 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. + */ + +#ifndef _NE_MISC_DEV_H_ +#define _NE_MISC_DEV_H_ + +#include <linux/cpumask.h> +#include <linux/list.h> +#include <linux/miscdevice.h> +#include <linux/mm.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/wait.h> + +#include "ne_pci_dev.h" + +/** + * struct ne_mem_region - Entry in the enclave user space memory regions list. + * @mem_region_list_entry: Entry in the list of enclave memory regions. + * @memory_size: Size of the user space memory region. + * @nr_pages: Number of pages that make up the memory region. + * @pages: Pages that make up the user space memory region. + * @userspace_addr: User space address of the memory region. + */ +struct ne_mem_region { + struct list_head mem_region_list_entry; + u64 memory_size; + unsigned long nr_pages; + struct page **pages; + u64 userspace_addr; +}; + +/** + * struct ne_enclave - Per-enclave data used for enclave lifetime management. + * @enclave_info_mutex : Mutex for accessing this internal state. + * @enclave_list_entry : Entry in the list of created enclaves. + * @eventq: Wait queue used for out-of-band event notifications + * triggered from the PCI device event handler to + * the enclave process via the poll function. + * @has_event: Variable used to determine if the out-of-band event + * was triggered. + * @max_mem_regions: The maximum number of memory regions that can be + * handled by the hypervisor. + * @mem_regions_list: Enclave user space memory regions list. + * @mem_size: Enclave memory size. + * @mm : Enclave process abstraction mm data struct. + * @nr_mem_regions: Number of memory regions associated with the enclave. + * @nr_parent_vm_cores : The size of the threads per core array. The + * total number of CPU cores available on the + * parent / primary VM. + * @nr_threads_per_core: The number of threads that a full CPU core has. + * @nr_vcpus: Number of vcpus associated with the enclave. + * @numa_node: NUMA node of the enclave memory and CPUs. + * @slot_uid: Slot unique id mapped to the enclave. + * @state: Enclave state, updated during enclave lifetime. + * @threads_per_core: Enclave full CPU cores array, indexed by core id, + * consisting of cpumasks with all their threads. + * Full CPU cores are taken from the NE CPU pool + * and are available to the enclave. + * @vcpu_ids: Cpumask of the vCPUs that are set for the enclave. + */ +struct ne_enclave { + struct mutex enclave_info_mutex; + struct list_head enclave_list_entry; + wait_queue_head_t eventq; + bool has_event; + u64 max_mem_regions; + struct list_head mem_regions_list; + u64 mem_size; + struct mm_struct *mm; + unsigned int nr_mem_regions; + unsigned int nr_parent_vm_cores; + unsigned int nr_threads_per_core; + unsigned int nr_vcpus; + int numa_node; + u64 slot_uid; + u16 state; + cpumask_var_t *threads_per_core; + cpumask_var_t vcpu_ids; +}; + +/** + * enum ne_state - States available for an enclave. + * @NE_STATE_INIT: The enclave has not been started yet. + * @NE_STATE_RUNNING: The enclave was started and is running as expected. + * @NE_STATE_STOPPED: The enclave exited without userspace interaction. + */ +enum ne_state { + NE_STATE_INIT = 0, + NE_STATE_RUNNING = 2, + NE_STATE_STOPPED = U16_MAX, +}; + +/** + * struct ne_devs - Data structure to keep refs to the NE misc and PCI devices. + * @ne_misc_dev: Nitro Enclaves misc device. + * @ne_pci_dev : Nitro Enclaves PCI device. + */ +struct ne_devs { + struct miscdevice *ne_misc_dev; + struct ne_pci_dev *ne_pci_dev; +}; + +/* Nitro Enclaves (NE) data structure for keeping refs to the NE misc and PCI devices. */ +extern struct ne_devs ne_devs; + +#endif /* _NE_MISC_DEV_H_ */ diff --git a/drivers/virt/nitro_enclaves/ne_misc_dev_test.c b/drivers/virt/nitro_enclaves/ne_misc_dev_test.c new file mode 100644 index 000000000..74df43b92 --- /dev/null +++ b/drivers/virt/nitro_enclaves/ne_misc_dev_test.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <kunit/test.h> + +#define MAX_PHYS_REGIONS 16 +#define INVALID_VALUE (~0ull) + +struct ne_phys_regions_test { + u64 paddr; + u64 size; + int expect_rc; + unsigned long expect_num; + u64 expect_last_paddr; + u64 expect_last_size; +} phys_regions_test_cases[] = { + /* + * Add the region from 0x1000 to (0x1000 + 0x200000 - 1): + * Expected result: + * Failed, start address is not 2M-aligned + * + * Now the instance of struct ne_phys_contig_mem_regions is: + * num = 0 + * regions = {} + */ + {0x1000, 0x200000, -EINVAL, 0, INVALID_VALUE, INVALID_VALUE}, + + /* + * Add the region from 0x200000 to (0x200000 + 0x1000 - 1): + * Expected result: + * Failed, size is not 2M-aligned + * + * Now the instance of struct ne_phys_contig_mem_regions is: + * num = 0 + * regions = {} + */ + {0x200000, 0x1000, -EINVAL, 0, INVALID_VALUE, INVALID_VALUE}, + + /* + * Add the region from 0x200000 to (0x200000 + 0x200000 - 1): + * Expected result: + * Successful + * + * Now the instance of struct ne_phys_contig_mem_regions is: + * num = 1 + * regions = { + * {start=0x200000, end=0x3fffff}, // len=0x200000 + * } + */ + {0x200000, 0x200000, 0, 1, 0x200000, 0x200000}, + + /* + * Add the region from 0x0 to (0x0 + 0x200000 - 1): + * Expected result: + * Successful + * + * Now the instance of struct ne_phys_contig_mem_regions is: + * num = 2 + * regions = { + * {start=0x200000, end=0x3fffff}, // len=0x200000 + * {start=0x0, end=0x1fffff}, // len=0x200000 + * } + */ + {0x0, 0x200000, 0, 2, 0x0, 0x200000}, + + /* + * Add the region from 0x600000 to (0x600000 + 0x400000 - 1): + * Expected result: + * Successful + * + * Now the instance of struct ne_phys_contig_mem_regions is: + * num = 3 + * regions = { + * {start=0x200000, end=0x3fffff}, // len=0x200000 + * {start=0x0, end=0x1fffff}, // len=0x200000 + * {start=0x600000, end=0x9fffff}, // len=0x400000 + * } + */ + {0x600000, 0x400000, 0, 3, 0x600000, 0x400000}, + + /* + * Add the region from 0xa00000 to (0xa00000 + 0x400000 - 1): + * Expected result: + * Successful, merging case! + * + * Now the instance of struct ne_phys_contig_mem_regions is: + * num = 3 + * regions = { + * {start=0x200000, end=0x3fffff}, // len=0x200000 + * {start=0x0, end=0x1fffff}, // len=0x200000 + * {start=0x600000, end=0xdfffff}, // len=0x800000 + * } + */ + {0xa00000, 0x400000, 0, 3, 0x600000, 0x800000}, + + /* + * Add the region from 0x1000 to (0x1000 + 0x200000 - 1): + * Expected result: + * Failed, start address is not 2M-aligned + * + * Now the instance of struct ne_phys_contig_mem_regions is: + * num = 3 + * regions = { + * {start=0x200000, end=0x3fffff}, // len=0x200000 + * {start=0x0, end=0x1fffff}, // len=0x200000 + * {start=0x600000, end=0xdfffff}, // len=0x800000 + * } + */ + {0x1000, 0x200000, -EINVAL, 3, 0x600000, 0x800000}, +}; + +static void ne_misc_dev_test_merge_phys_contig_memory_regions(struct kunit *test) +{ + struct ne_phys_contig_mem_regions phys_contig_mem_regions = {}; + int rc = 0; + int i = 0; + + phys_contig_mem_regions.regions = kunit_kcalloc(test, MAX_PHYS_REGIONS, + sizeof(*phys_contig_mem_regions.regions), + GFP_KERNEL); + KUNIT_ASSERT_TRUE(test, phys_contig_mem_regions.regions); + + for (i = 0; i < ARRAY_SIZE(phys_regions_test_cases); i++) { + struct ne_phys_regions_test *test_case = &phys_regions_test_cases[i]; + unsigned long num = 0; + + rc = ne_merge_phys_contig_memory_regions(&phys_contig_mem_regions, + test_case->paddr, test_case->size); + KUNIT_EXPECT_EQ(test, rc, test_case->expect_rc); + KUNIT_EXPECT_EQ(test, phys_contig_mem_regions.num, test_case->expect_num); + + if (test_case->expect_last_paddr == INVALID_VALUE) + continue; + + num = phys_contig_mem_regions.num; + KUNIT_EXPECT_EQ(test, phys_contig_mem_regions.regions[num - 1].start, + test_case->expect_last_paddr); + KUNIT_EXPECT_EQ(test, range_len(&phys_contig_mem_regions.regions[num - 1]), + test_case->expect_last_size); + } + + kunit_kfree(test, phys_contig_mem_regions.regions); +} + +static struct kunit_case ne_misc_dev_test_cases[] = { + KUNIT_CASE(ne_misc_dev_test_merge_phys_contig_memory_regions), + {} +}; + +static struct kunit_suite ne_misc_dev_test_suite = { + .name = "ne_misc_dev_test", + .test_cases = ne_misc_dev_test_cases, +}; + +kunit_test_suite(ne_misc_dev_test_suite); diff --git a/drivers/virt/nitro_enclaves/ne_pci_dev.c b/drivers/virt/nitro_enclaves/ne_pci_dev.c new file mode 100644 index 000000000..6b81e8f3a --- /dev/null +++ b/drivers/virt/nitro_enclaves/ne_pci_dev.c @@ -0,0 +1,626 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2020-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + */ + +/** + * DOC: Nitro Enclaves (NE) PCI device driver. + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/nitro_enclaves.h> +#include <linux/pci.h> +#include <linux/types.h> +#include <linux/wait.h> + +#include "ne_misc_dev.h" +#include "ne_pci_dev.h" + +/** + * NE_DEFAULT_TIMEOUT_MSECS - Default timeout to wait for a reply from + * the NE PCI device. + */ +#define NE_DEFAULT_TIMEOUT_MSECS (120000) /* 120 sec */ + +static const struct pci_device_id ne_pci_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_NE) }, + { 0, } +}; + +MODULE_DEVICE_TABLE(pci, ne_pci_ids); + +/** + * ne_submit_request() - Submit command request to the PCI device based on the + * command type. + * @pdev: PCI device to send the command to. + * @cmd_type: Command type of the request sent to the PCI device. + * @cmd_request: Command request payload. + * @cmd_request_size: Size of the command request payload. + * + * Context: Process context. This function is called with the ne_pci_dev mutex held. + */ +static void ne_submit_request(struct pci_dev *pdev, enum ne_pci_dev_cmd_type cmd_type, + void *cmd_request, size_t cmd_request_size) +{ + struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev); + + memcpy_toio(ne_pci_dev->iomem_base + NE_SEND_DATA, cmd_request, cmd_request_size); + + iowrite32(cmd_type, ne_pci_dev->iomem_base + NE_COMMAND); +} + +/** + * ne_retrieve_reply() - Retrieve reply from the PCI device. + * @pdev: PCI device to receive the reply from. + * @cmd_reply: Command reply payload. + * @cmd_reply_size: Size of the command reply payload. + * + * Context: Process context. This function is called with the ne_pci_dev mutex held. + */ +static void ne_retrieve_reply(struct pci_dev *pdev, struct ne_pci_dev_cmd_reply *cmd_reply, + size_t cmd_reply_size) +{ + struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev); + + memcpy_fromio(cmd_reply, ne_pci_dev->iomem_base + NE_RECV_DATA, cmd_reply_size); +} + +/** + * ne_wait_for_reply() - Wait for a reply of a PCI device command. + * @pdev: PCI device for which a reply is waited. + * + * Context: Process context. This function is called with the ne_pci_dev mutex held. + * Return: + * * 0 on success. + * * Negative return value on failure. + */ +static int ne_wait_for_reply(struct pci_dev *pdev) +{ + struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev); + int rc = -EINVAL; + + /* + * TODO: Update to _interruptible and handle interrupted wait event + * e.g. -ERESTARTSYS, incoming signals + update timeout, if needed. + */ + rc = wait_event_timeout(ne_pci_dev->cmd_reply_wait_q, + atomic_read(&ne_pci_dev->cmd_reply_avail) != 0, + msecs_to_jiffies(NE_DEFAULT_TIMEOUT_MSECS)); + if (!rc) + return -ETIMEDOUT; + + return 0; +} + +int ne_do_request(struct pci_dev *pdev, enum ne_pci_dev_cmd_type cmd_type, + void *cmd_request, size_t cmd_request_size, + struct ne_pci_dev_cmd_reply *cmd_reply, size_t cmd_reply_size) +{ + struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev); + int rc = -EINVAL; + + if (cmd_type <= INVALID_CMD || cmd_type >= MAX_CMD) { + dev_err_ratelimited(&pdev->dev, "Invalid cmd type=%u\n", cmd_type); + + return -EINVAL; + } + + if (!cmd_request) { + dev_err_ratelimited(&pdev->dev, "Null cmd request for cmd type=%u\n", + cmd_type); + + return -EINVAL; + } + + if (cmd_request_size > NE_SEND_DATA_SIZE) { + dev_err_ratelimited(&pdev->dev, "Invalid req size=%zu for cmd type=%u\n", + cmd_request_size, cmd_type); + + return -EINVAL; + } + + if (!cmd_reply) { + dev_err_ratelimited(&pdev->dev, "Null cmd reply for cmd type=%u\n", + cmd_type); + + return -EINVAL; + } + + if (cmd_reply_size > NE_RECV_DATA_SIZE) { + dev_err_ratelimited(&pdev->dev, "Invalid reply size=%zu for cmd type=%u\n", + cmd_reply_size, cmd_type); + + return -EINVAL; + } + + /* + * Use this mutex so that the PCI device handles one command request at + * a time. + */ + mutex_lock(&ne_pci_dev->pci_dev_mutex); + + atomic_set(&ne_pci_dev->cmd_reply_avail, 0); + + ne_submit_request(pdev, cmd_type, cmd_request, cmd_request_size); + + rc = ne_wait_for_reply(pdev); + if (rc < 0) { + dev_err_ratelimited(&pdev->dev, "Error in wait for reply for cmd type=%u [rc=%d]\n", + cmd_type, rc); + + goto unlock_mutex; + } + + ne_retrieve_reply(pdev, cmd_reply, cmd_reply_size); + + atomic_set(&ne_pci_dev->cmd_reply_avail, 0); + + if (cmd_reply->rc < 0) { + rc = cmd_reply->rc; + + dev_err_ratelimited(&pdev->dev, "Error in cmd process logic, cmd type=%u [rc=%d]\n", + cmd_type, rc); + + goto unlock_mutex; + } + + rc = 0; + +unlock_mutex: + mutex_unlock(&ne_pci_dev->pci_dev_mutex); + + return rc; +} + +/** + * ne_reply_handler() - Interrupt handler for retrieving a reply matching a + * request sent to the PCI device for enclave lifetime + * management. + * @irq: Received interrupt for a reply sent by the PCI device. + * @args: PCI device private data structure. + * + * Context: Interrupt context. + * Return: + * * IRQ_HANDLED on handled interrupt. + */ +static irqreturn_t ne_reply_handler(int irq, void *args) +{ + struct ne_pci_dev *ne_pci_dev = (struct ne_pci_dev *)args; + + atomic_set(&ne_pci_dev->cmd_reply_avail, 1); + + /* TODO: Update to _interruptible. */ + wake_up(&ne_pci_dev->cmd_reply_wait_q); + + return IRQ_HANDLED; +} + +/** + * ne_event_work_handler() - Work queue handler for notifying enclaves on a + * state change received by the event interrupt + * handler. + * @work: Item containing the NE PCI device for which an out-of-band event + * was issued. + * + * An out-of-band event is being issued by the Nitro Hypervisor when at least + * one enclave is changing state without client interaction. + * + * Context: Work queue context. + */ +static void ne_event_work_handler(struct work_struct *work) +{ + struct ne_pci_dev_cmd_reply cmd_reply = {}; + struct ne_enclave *ne_enclave = NULL; + struct ne_pci_dev *ne_pci_dev = + container_of(work, struct ne_pci_dev, notify_work); + struct pci_dev *pdev = ne_pci_dev->pdev; + int rc = -EINVAL; + struct slot_info_req slot_info_req = {}; + + mutex_lock(&ne_pci_dev->enclaves_list_mutex); + + /* + * Iterate over all enclaves registered for the Nitro Enclaves + * PCI device and determine for which enclave(s) the out-of-band event + * is corresponding to. + */ + list_for_each_entry(ne_enclave, &ne_pci_dev->enclaves_list, enclave_list_entry) { + mutex_lock(&ne_enclave->enclave_info_mutex); + + /* + * Enclaves that were never started cannot receive out-of-band + * events. + */ + if (ne_enclave->state != NE_STATE_RUNNING) + goto unlock; + + slot_info_req.slot_uid = ne_enclave->slot_uid; + + rc = ne_do_request(pdev, SLOT_INFO, + &slot_info_req, sizeof(slot_info_req), + &cmd_reply, sizeof(cmd_reply)); + if (rc < 0) + dev_err(&pdev->dev, "Error in slot info [rc=%d]\n", rc); + + /* Notify enclave process that the enclave state changed. */ + if (ne_enclave->state != cmd_reply.state) { + ne_enclave->state = cmd_reply.state; + + ne_enclave->has_event = true; + + wake_up_interruptible(&ne_enclave->eventq); + } + +unlock: + mutex_unlock(&ne_enclave->enclave_info_mutex); + } + + mutex_unlock(&ne_pci_dev->enclaves_list_mutex); +} + +/** + * ne_event_handler() - Interrupt handler for PCI device out-of-band events. + * This interrupt does not supply any data in the MMIO + * region. It notifies a change in the state of any of + * the launched enclaves. + * @irq: Received interrupt for an out-of-band event. + * @args: PCI device private data structure. + * + * Context: Interrupt context. + * Return: + * * IRQ_HANDLED on handled interrupt. + */ +static irqreturn_t ne_event_handler(int irq, void *args) +{ + struct ne_pci_dev *ne_pci_dev = (struct ne_pci_dev *)args; + + queue_work(ne_pci_dev->event_wq, &ne_pci_dev->notify_work); + + return IRQ_HANDLED; +} + +/** + * ne_setup_msix() - Setup MSI-X vectors for the PCI device. + * @pdev: PCI device to setup the MSI-X for. + * + * Context: Process context. + * Return: + * * 0 on success. + * * Negative return value on failure. + */ +static int ne_setup_msix(struct pci_dev *pdev) +{ + struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev); + int nr_vecs = 0; + int rc = -EINVAL; + + nr_vecs = pci_msix_vec_count(pdev); + if (nr_vecs < 0) { + rc = nr_vecs; + + dev_err(&pdev->dev, "Error in getting vec count [rc=%d]\n", rc); + + return rc; + } + + rc = pci_alloc_irq_vectors(pdev, nr_vecs, nr_vecs, PCI_IRQ_MSIX); + if (rc < 0) { + dev_err(&pdev->dev, "Error in alloc MSI-X vecs [rc=%d]\n", rc); + + return rc; + } + + /* + * This IRQ gets triggered every time the PCI device responds to a + * command request. The reply is then retrieved, reading from the MMIO + * space of the PCI device. + */ + rc = request_irq(pci_irq_vector(pdev, NE_VEC_REPLY), ne_reply_handler, + 0, "enclave_cmd", ne_pci_dev); + if (rc < 0) { + dev_err(&pdev->dev, "Error in request irq reply [rc=%d]\n", rc); + + goto free_irq_vectors; + } + + ne_pci_dev->event_wq = create_singlethread_workqueue("ne_pci_dev_wq"); + if (!ne_pci_dev->event_wq) { + rc = -ENOMEM; + + dev_err(&pdev->dev, "Cannot get wq for dev events [rc=%d]\n", rc); + + goto free_reply_irq_vec; + } + + INIT_WORK(&ne_pci_dev->notify_work, ne_event_work_handler); + + /* + * This IRQ gets triggered every time any enclave's state changes. Its + * handler then scans for the changes and propagates them to the user + * space. + */ + rc = request_irq(pci_irq_vector(pdev, NE_VEC_EVENT), ne_event_handler, + 0, "enclave_evt", ne_pci_dev); + if (rc < 0) { + dev_err(&pdev->dev, "Error in request irq event [rc=%d]\n", rc); + + goto destroy_wq; + } + + return 0; + +destroy_wq: + destroy_workqueue(ne_pci_dev->event_wq); +free_reply_irq_vec: + free_irq(pci_irq_vector(pdev, NE_VEC_REPLY), ne_pci_dev); +free_irq_vectors: + pci_free_irq_vectors(pdev); + + return rc; +} + +/** + * ne_teardown_msix() - Teardown MSI-X vectors for the PCI device. + * @pdev: PCI device to teardown the MSI-X for. + * + * Context: Process context. + */ +static void ne_teardown_msix(struct pci_dev *pdev) +{ + struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev); + + free_irq(pci_irq_vector(pdev, NE_VEC_EVENT), ne_pci_dev); + + flush_work(&ne_pci_dev->notify_work); + destroy_workqueue(ne_pci_dev->event_wq); + + free_irq(pci_irq_vector(pdev, NE_VEC_REPLY), ne_pci_dev); + + pci_free_irq_vectors(pdev); +} + +/** + * ne_pci_dev_enable() - Select the PCI device version and enable it. + * @pdev: PCI device to select version for and then enable. + * + * Context: Process context. + * Return: + * * 0 on success. + * * Negative return value on failure. + */ +static int ne_pci_dev_enable(struct pci_dev *pdev) +{ + u8 dev_enable_reply = 0; + u16 dev_version_reply = 0; + struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev); + + iowrite16(NE_VERSION_MAX, ne_pci_dev->iomem_base + NE_VERSION); + + dev_version_reply = ioread16(ne_pci_dev->iomem_base + NE_VERSION); + if (dev_version_reply != NE_VERSION_MAX) { + dev_err(&pdev->dev, "Error in pci dev version cmd\n"); + + return -EIO; + } + + iowrite8(NE_ENABLE_ON, ne_pci_dev->iomem_base + NE_ENABLE); + + dev_enable_reply = ioread8(ne_pci_dev->iomem_base + NE_ENABLE); + if (dev_enable_reply != NE_ENABLE_ON) { + dev_err(&pdev->dev, "Error in pci dev enable cmd\n"); + + return -EIO; + } + + return 0; +} + +/** + * ne_pci_dev_disable() - Disable the PCI device. + * @pdev: PCI device to disable. + * + * Context: Process context. + */ +static void ne_pci_dev_disable(struct pci_dev *pdev) +{ + u8 dev_disable_reply = 0; + struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev); + const unsigned int sleep_time = 10; /* 10 ms */ + unsigned int sleep_time_count = 0; + + iowrite8(NE_ENABLE_OFF, ne_pci_dev->iomem_base + NE_ENABLE); + + /* + * Check for NE_ENABLE_OFF in a loop, to handle cases when the device + * state is not immediately set to disabled and going through a + * transitory state of disabling. + */ + while (sleep_time_count < NE_DEFAULT_TIMEOUT_MSECS) { + dev_disable_reply = ioread8(ne_pci_dev->iomem_base + NE_ENABLE); + if (dev_disable_reply == NE_ENABLE_OFF) + return; + + msleep_interruptible(sleep_time); + sleep_time_count += sleep_time; + } + + dev_disable_reply = ioread8(ne_pci_dev->iomem_base + NE_ENABLE); + if (dev_disable_reply != NE_ENABLE_OFF) + dev_err(&pdev->dev, "Error in pci dev disable cmd\n"); +} + +/** + * ne_pci_probe() - Probe function for the NE PCI device. + * @pdev: PCI device to match with the NE PCI driver. + * @id : PCI device id table associated with the NE PCI driver. + * + * Context: Process context. + * Return: + * * 0 on success. + * * Negative return value on failure. + */ +static int ne_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct ne_pci_dev *ne_pci_dev = NULL; + int rc = -EINVAL; + + ne_pci_dev = kzalloc(sizeof(*ne_pci_dev), GFP_KERNEL); + if (!ne_pci_dev) + return -ENOMEM; + + rc = pci_enable_device(pdev); + if (rc < 0) { + dev_err(&pdev->dev, "Error in pci dev enable [rc=%d]\n", rc); + + goto free_ne_pci_dev; + } + + pci_set_master(pdev); + + rc = pci_request_regions_exclusive(pdev, "nitro_enclaves"); + if (rc < 0) { + dev_err(&pdev->dev, "Error in pci request regions [rc=%d]\n", rc); + + goto disable_pci_dev; + } + + ne_pci_dev->iomem_base = pci_iomap(pdev, PCI_BAR_NE, 0); + if (!ne_pci_dev->iomem_base) { + rc = -ENOMEM; + + dev_err(&pdev->dev, "Error in pci iomap [rc=%d]\n", rc); + + goto release_pci_regions; + } + + pci_set_drvdata(pdev, ne_pci_dev); + + rc = ne_setup_msix(pdev); + if (rc < 0) { + dev_err(&pdev->dev, "Error in pci dev msix setup [rc=%d]\n", rc); + + goto iounmap_pci_bar; + } + + ne_pci_dev_disable(pdev); + + rc = ne_pci_dev_enable(pdev); + if (rc < 0) { + dev_err(&pdev->dev, "Error in ne_pci_dev enable [rc=%d]\n", rc); + + goto teardown_msix; + } + + atomic_set(&ne_pci_dev->cmd_reply_avail, 0); + init_waitqueue_head(&ne_pci_dev->cmd_reply_wait_q); + INIT_LIST_HEAD(&ne_pci_dev->enclaves_list); + mutex_init(&ne_pci_dev->enclaves_list_mutex); + mutex_init(&ne_pci_dev->pci_dev_mutex); + ne_pci_dev->pdev = pdev; + + ne_devs.ne_pci_dev = ne_pci_dev; + + rc = misc_register(ne_devs.ne_misc_dev); + if (rc < 0) { + dev_err(&pdev->dev, "Error in misc dev register [rc=%d]\n", rc); + + goto disable_ne_pci_dev; + } + + return 0; + +disable_ne_pci_dev: + ne_devs.ne_pci_dev = NULL; + ne_pci_dev_disable(pdev); +teardown_msix: + ne_teardown_msix(pdev); +iounmap_pci_bar: + pci_set_drvdata(pdev, NULL); + pci_iounmap(pdev, ne_pci_dev->iomem_base); +release_pci_regions: + pci_release_regions(pdev); +disable_pci_dev: + pci_disable_device(pdev); +free_ne_pci_dev: + kfree(ne_pci_dev); + + return rc; +} + +/** + * ne_pci_remove() - Remove function for the NE PCI device. + * @pdev: PCI device associated with the NE PCI driver. + * + * Context: Process context. + */ +static void ne_pci_remove(struct pci_dev *pdev) +{ + struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev); + + misc_deregister(ne_devs.ne_misc_dev); + + ne_devs.ne_pci_dev = NULL; + + ne_pci_dev_disable(pdev); + + ne_teardown_msix(pdev); + + pci_set_drvdata(pdev, NULL); + + pci_iounmap(pdev, ne_pci_dev->iomem_base); + + pci_release_regions(pdev); + + pci_disable_device(pdev); + + kfree(ne_pci_dev); +} + +/** + * ne_pci_shutdown() - Shutdown function for the NE PCI device. + * @pdev: PCI device associated with the NE PCI driver. + * + * Context: Process context. + */ +static void ne_pci_shutdown(struct pci_dev *pdev) +{ + struct ne_pci_dev *ne_pci_dev = pci_get_drvdata(pdev); + + if (!ne_pci_dev) + return; + + misc_deregister(ne_devs.ne_misc_dev); + + ne_devs.ne_pci_dev = NULL; + + ne_pci_dev_disable(pdev); + + ne_teardown_msix(pdev); + + pci_set_drvdata(pdev, NULL); + + pci_iounmap(pdev, ne_pci_dev->iomem_base); + + pci_release_regions(pdev); + + pci_disable_device(pdev); + + kfree(ne_pci_dev); +} + +/* + * TODO: Add suspend / resume functions for power management w/ CONFIG_PM, if + * needed. + */ +/* NE PCI device driver. */ +struct pci_driver ne_pci_driver = { + .name = "nitro_enclaves", + .id_table = ne_pci_ids, + .probe = ne_pci_probe, + .remove = ne_pci_remove, + .shutdown = ne_pci_shutdown, +}; diff --git a/drivers/virt/nitro_enclaves/ne_pci_dev.h b/drivers/virt/nitro_enclaves/ne_pci_dev.h new file mode 100644 index 000000000..6e9f28971 --- /dev/null +++ b/drivers/virt/nitro_enclaves/ne_pci_dev.h @@ -0,0 +1,331 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright 2020-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + */ + +#ifndef _NE_PCI_DEV_H_ +#define _NE_PCI_DEV_H_ + +#include <linux/atomic.h> +#include <linux/list.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/pci_ids.h> +#include <linux/wait.h> + +/** + * DOC: Nitro Enclaves (NE) PCI device + */ + +/** + * PCI_DEVICE_ID_NE - Nitro Enclaves PCI device id. + */ +#define PCI_DEVICE_ID_NE (0xe4c1) +/** + * PCI_BAR_NE - Nitro Enclaves PCI device MMIO BAR. + */ +#define PCI_BAR_NE (0x03) + +/** + * DOC: Device registers in the NE PCI device MMIO BAR + */ + +/** + * NE_ENABLE - (1 byte) Register to notify the device that the driver is using + * it (Read/Write). + */ +#define NE_ENABLE (0x0000) +#define NE_ENABLE_OFF (0x00) +#define NE_ENABLE_ON (0x01) + +/** + * NE_VERSION - (2 bytes) Register to select the device run-time version + * (Read/Write). + */ +#define NE_VERSION (0x0002) +#define NE_VERSION_MAX (0x0001) + +/** + * NE_COMMAND - (4 bytes) Register to notify the device what command was + * requested (Write-Only). + */ +#define NE_COMMAND (0x0004) + +/** + * NE_EVTCNT - (4 bytes) Register to notify the driver that a reply or a device + * event is available (Read-Only): + * - Lower half - command reply counter + * - Higher half - out-of-band device event counter + */ +#define NE_EVTCNT (0x000c) +#define NE_EVTCNT_REPLY_SHIFT (0) +#define NE_EVTCNT_REPLY_MASK (0x0000ffff) +#define NE_EVTCNT_REPLY(cnt) (((cnt) & NE_EVTCNT_REPLY_MASK) >> \ + NE_EVTCNT_REPLY_SHIFT) +#define NE_EVTCNT_EVENT_SHIFT (16) +#define NE_EVTCNT_EVENT_MASK (0xffff0000) +#define NE_EVTCNT_EVENT(cnt) (((cnt) & NE_EVTCNT_EVENT_MASK) >> \ + NE_EVTCNT_EVENT_SHIFT) + +/** + * NE_SEND_DATA - (240 bytes) Buffer for sending the command request payload + * (Read/Write). + */ +#define NE_SEND_DATA (0x0010) + +/** + * NE_RECV_DATA - (240 bytes) Buffer for receiving the command reply payload + * (Read-Only). + */ +#define NE_RECV_DATA (0x0100) + +/** + * DOC: Device MMIO buffer sizes + */ + +/** + * NE_SEND_DATA_SIZE - Size of the send buffer, in bytes. + */ +#define NE_SEND_DATA_SIZE (240) + +/** + * NE_RECV_DATA_SIZE - Size of the receive buffer, in bytes. + */ +#define NE_RECV_DATA_SIZE (240) + +/** + * DOC: MSI-X interrupt vectors + */ + +/** + * NE_VEC_REPLY - MSI-X vector used for command reply notification. + */ +#define NE_VEC_REPLY (0) + +/** + * NE_VEC_EVENT - MSI-X vector used for out-of-band events e.g. enclave crash. + */ +#define NE_VEC_EVENT (1) + +/** + * enum ne_pci_dev_cmd_type - Device command types. + * @INVALID_CMD: Invalid command. + * @ENCLAVE_START: Start an enclave, after setting its resources. + * @ENCLAVE_GET_SLOT: Get the slot uid of an enclave. + * @ENCLAVE_STOP: Terminate an enclave. + * @SLOT_ALLOC : Allocate a slot for an enclave. + * @SLOT_FREE: Free the slot allocated for an enclave + * @SLOT_ADD_MEM: Add a memory region to an enclave slot. + * @SLOT_ADD_VCPU: Add a vCPU to an enclave slot. + * @SLOT_COUNT : Get the number of allocated slots. + * @NEXT_SLOT: Get the next slot in the list of allocated slots. + * @SLOT_INFO: Get the info for a slot e.g. slot uid, vCPUs count. + * @SLOT_ADD_BULK_VCPUS: Add a number of vCPUs, not providing CPU ids. + * @MAX_CMD: A gatekeeper for max possible command type. + */ +enum ne_pci_dev_cmd_type { + INVALID_CMD = 0, + ENCLAVE_START = 1, + ENCLAVE_GET_SLOT = 2, + ENCLAVE_STOP = 3, + SLOT_ALLOC = 4, + SLOT_FREE = 5, + SLOT_ADD_MEM = 6, + SLOT_ADD_VCPU = 7, + SLOT_COUNT = 8, + NEXT_SLOT = 9, + SLOT_INFO = 10, + SLOT_ADD_BULK_VCPUS = 11, + MAX_CMD, +}; + +/** + * DOC: Device commands - payload structure for requests and replies. + */ + +/** + * struct enclave_start_req - ENCLAVE_START request. + * @slot_uid: Slot unique id mapped to the enclave to start. + * @enclave_cid: Context ID (CID) for the enclave vsock device. + * If 0, CID is autogenerated. + * @flags: Flags for the enclave to start with (e.g. debug mode). + */ +struct enclave_start_req { + u64 slot_uid; + u64 enclave_cid; + u64 flags; +}; + +/** + * struct enclave_get_slot_req - ENCLAVE_GET_SLOT request. + * @enclave_cid: Context ID (CID) for the enclave vsock device. + */ +struct enclave_get_slot_req { + u64 enclave_cid; +}; + +/** + * struct enclave_stop_req - ENCLAVE_STOP request. + * @slot_uid: Slot unique id mapped to the enclave to stop. + */ +struct enclave_stop_req { + u64 slot_uid; +}; + +/** + * struct slot_alloc_req - SLOT_ALLOC request. + * @unused: In order to avoid weird sizeof edge cases. + */ +struct slot_alloc_req { + u8 unused; +}; + +/** + * struct slot_free_req - SLOT_FREE request. + * @slot_uid: Slot unique id mapped to the slot to free. + */ +struct slot_free_req { + u64 slot_uid; +}; + +/* TODO: Add flags field to the request to add memory region. */ +/** + * struct slot_add_mem_req - SLOT_ADD_MEM request. + * @slot_uid: Slot unique id mapped to the slot to add the memory region to. + * @paddr: Physical address of the memory region to add to the slot. + * @size: Memory size, in bytes, of the memory region to add to the slot. + */ +struct slot_add_mem_req { + u64 slot_uid; + u64 paddr; + u64 size; +}; + +/** + * struct slot_add_vcpu_req - SLOT_ADD_VCPU request. + * @slot_uid: Slot unique id mapped to the slot to add the vCPU to. + * @vcpu_id: vCPU ID of the CPU to add to the enclave. + * @padding: Padding for the overall data structure. + */ +struct slot_add_vcpu_req { + u64 slot_uid; + u32 vcpu_id; + u8 padding[4]; +}; + +/** + * struct slot_count_req - SLOT_COUNT request. + * @unused: In order to avoid weird sizeof edge cases. + */ +struct slot_count_req { + u8 unused; +}; + +/** + * struct next_slot_req - NEXT_SLOT request. + * @slot_uid: Slot unique id of the next slot in the iteration. + */ +struct next_slot_req { + u64 slot_uid; +}; + +/** + * struct slot_info_req - SLOT_INFO request. + * @slot_uid: Slot unique id mapped to the slot to get information about. + */ +struct slot_info_req { + u64 slot_uid; +}; + +/** + * struct slot_add_bulk_vcpus_req - SLOT_ADD_BULK_VCPUS request. + * @slot_uid: Slot unique id mapped to the slot to add vCPUs to. + * @nr_vcpus: Number of vCPUs to add to the slot. + */ +struct slot_add_bulk_vcpus_req { + u64 slot_uid; + u64 nr_vcpus; +}; + +/** + * struct ne_pci_dev_cmd_reply - NE PCI device command reply. + * @rc : Return code of the logic that processed the request. + * @padding0: Padding for the overall data structure. + * @slot_uid: Valid for all commands except SLOT_COUNT. + * @enclave_cid: Valid for ENCLAVE_START command. + * @slot_count : Valid for SLOT_COUNT command. + * @mem_regions: Valid for SLOT_ALLOC and SLOT_INFO commands. + * @mem_size: Valid for SLOT_INFO command. + * @nr_vcpus: Valid for SLOT_INFO command. + * @flags: Valid for SLOT_INFO command. + * @state: Valid for SLOT_INFO command. + * @padding1: Padding for the overall data structure. + */ +struct ne_pci_dev_cmd_reply { + s32 rc; + u8 padding0[4]; + u64 slot_uid; + u64 enclave_cid; + u64 slot_count; + u64 mem_regions; + u64 mem_size; + u64 nr_vcpus; + u64 flags; + u16 state; + u8 padding1[6]; +}; + +/** + * struct ne_pci_dev - Nitro Enclaves (NE) PCI device. + * @cmd_reply_avail: Variable set if a reply has been sent by the + * PCI device. + * @cmd_reply_wait_q: Wait queue for handling command reply from the + * PCI device. + * @enclaves_list: List of the enclaves managed by the PCI device. + * @enclaves_list_mutex: Mutex for accessing the list of enclaves. + * @event_wq: Work queue for handling out-of-band events + * triggered by the Nitro Hypervisor which require + * enclave state scanning and propagation to the + * enclave process. + * @iomem_base : MMIO region of the PCI device. + * @notify_work: Work item for every received out-of-band event. + * @pci_dev_mutex: Mutex for accessing the PCI device MMIO space. + * @pdev: PCI device data structure. + */ +struct ne_pci_dev { + atomic_t cmd_reply_avail; + wait_queue_head_t cmd_reply_wait_q; + struct list_head enclaves_list; + struct mutex enclaves_list_mutex; + struct workqueue_struct *event_wq; + void __iomem *iomem_base; + struct work_struct notify_work; + struct mutex pci_dev_mutex; + struct pci_dev *pdev; +}; + +/** + * ne_do_request() - Submit command request to the PCI device based on the command + * type and retrieve the associated reply. + * @pdev: PCI device to send the command to and receive the reply from. + * @cmd_type: Command type of the request sent to the PCI device. + * @cmd_request: Command request payload. + * @cmd_request_size: Size of the command request payload. + * @cmd_reply: Command reply payload. + * @cmd_reply_size: Size of the command reply payload. + * + * Context: Process context. This function uses the ne_pci_dev mutex to handle + * one command at a time. + * Return: + * * 0 on success. + * * Negative return value on failure. + */ +int ne_do_request(struct pci_dev *pdev, enum ne_pci_dev_cmd_type cmd_type, + void *cmd_request, size_t cmd_request_size, + struct ne_pci_dev_cmd_reply *cmd_reply, + size_t cmd_reply_size); + +/* Nitro Enclaves (NE) PCI device driver */ +extern struct pci_driver ne_pci_driver; + +#endif /* _NE_PCI_DEV_H_ */ |