summaryrefslogtreecommitdiffstats
path: root/drivers/virt/acrn
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/virt/acrn')
-rw-r--r--drivers/virt/acrn/mm.c61
1 files changed, 49 insertions, 12 deletions
diff --git a/drivers/virt/acrn/mm.c b/drivers/virt/acrn/mm.c
index fa5d9ca6be..9c75de0656 100644
--- a/drivers/virt/acrn/mm.c
+++ b/drivers/virt/acrn/mm.c
@@ -155,43 +155,83 @@ int acrn_vm_memseg_unmap(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
{
struct vm_memory_region_batch *regions_info;
- int nr_pages, i = 0, order, nr_regions = 0;
+ int nr_pages, i, order, nr_regions = 0;
struct vm_memory_mapping *region_mapping;
struct vm_memory_region_op *vm_region;
struct page **pages = NULL, *page;
void *remap_vaddr;
int ret, pinned;
u64 user_vm_pa;
- unsigned long pfn;
struct vm_area_struct *vma;
if (!vm || !memmap)
return -EINVAL;
+ /* Get the page number of the map region */
+ nr_pages = memmap->len >> PAGE_SHIFT;
+ if (!nr_pages)
+ return -EINVAL;
+
mmap_read_lock(current->mm);
vma = vma_lookup(current->mm, memmap->vma_base);
if (vma && ((vma->vm_flags & VM_PFNMAP) != 0)) {
+ unsigned long start_pfn, cur_pfn;
+ spinlock_t *ptl;
+ bool writable;
+ pte_t *ptep;
+
if ((memmap->vma_base + memmap->len) > vma->vm_end) {
mmap_read_unlock(current->mm);
return -EINVAL;
}
- ret = follow_pfn(vma, memmap->vma_base, &pfn);
+ for (i = 0; i < nr_pages; i++) {
+ ret = follow_pte(vma->vm_mm,
+ memmap->vma_base + i * PAGE_SIZE,
+ &ptep, &ptl);
+ if (ret)
+ break;
+
+ cur_pfn = pte_pfn(ptep_get(ptep));
+ if (i == 0)
+ start_pfn = cur_pfn;
+ writable = !!pte_write(ptep_get(ptep));
+ pte_unmap_unlock(ptep, ptl);
+
+ /* Disallow write access if the PTE is not writable. */
+ if (!writable &&
+ (memmap->attr & ACRN_MEM_ACCESS_WRITE)) {
+ ret = -EFAULT;
+ break;
+ }
+
+ /* Disallow refcounted pages. */
+ if (pfn_valid(cur_pfn) &&
+ !PageReserved(pfn_to_page(cur_pfn))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ /* Disallow non-contiguous ranges. */
+ if (cur_pfn != start_pfn + i) {
+ ret = -EINVAL;
+ break;
+ }
+ }
mmap_read_unlock(current->mm);
- if (ret < 0) {
+
+ if (ret) {
dev_dbg(acrn_dev.this_device,
"Failed to lookup PFN at VMA:%pK.\n", (void *)memmap->vma_base);
return ret;
}
return acrn_mm_region_add(vm, memmap->user_vm_pa,
- PFN_PHYS(pfn), memmap->len,
+ PFN_PHYS(start_pfn), memmap->len,
ACRN_MEM_TYPE_WB, memmap->attr);
}
mmap_read_unlock(current->mm);
- /* Get the page number of the map region */
- nr_pages = memmap->len >> PAGE_SHIFT;
pages = vzalloc(array_size(nr_pages, sizeof(*pages)));
if (!pages)
return -ENOMEM;
@@ -235,12 +275,11 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
mutex_unlock(&vm->regions_mapping_lock);
/* Calculate count of vm_memory_region_op */
- while (i < nr_pages) {
+ for (i = 0; i < nr_pages; i += 1 << order) {
page = pages[i];
VM_BUG_ON_PAGE(PageTail(page), page);
order = compound_order(page);
nr_regions++;
- i += 1 << order;
}
/* Prepare the vm_memory_region_batch */
@@ -257,8 +296,7 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
regions_info->vmid = vm->vmid;
regions_info->regions_gpa = virt_to_phys(vm_region);
user_vm_pa = memmap->user_vm_pa;
- i = 0;
- while (i < nr_pages) {
+ for (i = 0; i < nr_pages; i += 1 << order) {
u32 region_size;
page = pages[i];
@@ -274,7 +312,6 @@ int acrn_vm_ram_map(struct acrn_vm *vm, struct acrn_vm_memmap *memmap)
vm_region++;
user_vm_pa += region_size;
- i += 1 << order;
}
/* Inform the ACRN Hypervisor to set up EPT mappings */