summaryrefslogtreecommitdiffstats
path: root/arch/x86/mm/pat/set_memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/mm/pat/set_memory.c')
-rw-r--r--arch/x86/mm/pat/set_memory.c95
1 files changed, 31 insertions, 64 deletions
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index b4073fb45..80c9037ff 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -619,8 +619,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long start,
* Validate strict W^X semantics.
*/
static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long start,
- unsigned long pfn, unsigned long npg,
- bool nx, bool rw)
+ unsigned long pfn, unsigned long npg)
{
unsigned long end;
@@ -642,10 +641,6 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star
if ((pgprot_val(new) & (_PAGE_RW | _PAGE_NX)) != _PAGE_RW)
return new;
- /* Non-leaf translation entries can disable writing or execution. */
- if (!rw || nx)
- return new;
-
end = start + npg * PAGE_SIZE - 1;
WARN_ONCE(1, "CPA detected W^X violation: %016llx -> %016llx range: 0x%016lx - 0x%016lx PFN %lx\n",
(unsigned long long)pgprot_val(old),
@@ -662,37 +657,28 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star
/*
* Lookup the page table entry for a virtual address in a specific pgd.
- * Return a pointer to the entry, the level of the mapping, and the effective
- * NX and RW bits of all page table levels.
+ * Return a pointer to the entry and the level of the mapping.
*/
-pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address,
- unsigned int *level, bool *nx, bool *rw)
+pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
+ unsigned int *level)
{
p4d_t *p4d;
pud_t *pud;
pmd_t *pmd;
*level = PG_LEVEL_NONE;
- *nx = false;
- *rw = true;
if (pgd_none(*pgd))
return NULL;
- *nx |= pgd_flags(*pgd) & _PAGE_NX;
- *rw &= pgd_flags(*pgd) & _PAGE_RW;
-
p4d = p4d_offset(pgd, address);
if (p4d_none(*p4d))
return NULL;
*level = PG_LEVEL_512G;
- if (p4d_large(*p4d) || !p4d_present(*p4d))
+ if (p4d_leaf(*p4d) || !p4d_present(*p4d))
return (pte_t *)p4d;
- *nx |= p4d_flags(*p4d) & _PAGE_NX;
- *rw &= p4d_flags(*p4d) & _PAGE_RW;
-
pud = pud_offset(p4d, address);
if (pud_none(*pud))
return NULL;
@@ -701,38 +687,20 @@ pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address,
if (pud_leaf(*pud) || !pud_present(*pud))
return (pte_t *)pud;
- *nx |= pud_flags(*pud) & _PAGE_NX;
- *rw &= pud_flags(*pud) & _PAGE_RW;
-
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
return NULL;
*level = PG_LEVEL_2M;
- if (pmd_large(*pmd) || !pmd_present(*pmd))
+ if (pmd_leaf(*pmd) || !pmd_present(*pmd))
return (pte_t *)pmd;
- *nx |= pmd_flags(*pmd) & _PAGE_NX;
- *rw &= pmd_flags(*pmd) & _PAGE_RW;
-
*level = PG_LEVEL_4K;
return pte_offset_kernel(pmd, address);
}
/*
- * Lookup the page table entry for a virtual address in a specific pgd.
- * Return a pointer to the entry and the level of the mapping.
- */
-pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
- unsigned int *level)
-{
- bool nx, rw;
-
- return lookup_address_in_pgd_attr(pgd, address, level, &nx, &rw);
-}
-
-/*
* Lookup the page table entry for a virtual address. Return a pointer
* to the entry and the level of the mapping.
*
@@ -747,16 +715,13 @@ pte_t *lookup_address(unsigned long address, unsigned int *level)
EXPORT_SYMBOL_GPL(lookup_address);
static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address,
- unsigned int *level, bool *nx, bool *rw)
+ unsigned int *level)
{
- pgd_t *pgd;
+ if (cpa->pgd)
+ return lookup_address_in_pgd(cpa->pgd + pgd_index(address),
+ address, level);
- if (!cpa->pgd)
- pgd = pgd_offset_k(address);
- else
- pgd = cpa->pgd + pgd_index(address);
-
- return lookup_address_in_pgd_attr(pgd, address, level, nx, rw);
+ return lookup_address(address, level);
}
/*
@@ -774,7 +739,7 @@ pmd_t *lookup_pmd_address(unsigned long address)
return NULL;
p4d = p4d_offset(pgd, address);
- if (p4d_none(*p4d) || p4d_large(*p4d) || !p4d_present(*p4d))
+ if (p4d_none(*p4d) || p4d_leaf(*p4d) || !p4d_present(*p4d))
return NULL;
pud = pud_offset(p4d, address);
@@ -884,13 +849,12 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
pgprot_t old_prot, new_prot, req_prot, chk_prot;
pte_t new_pte, *tmp;
enum pg_level level;
- bool nx, rw;
/*
* Check for races, another CPU might have split this page
* up already:
*/
- tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
+ tmp = _lookup_address_cpa(cpa, address, &level);
if (tmp != kpte)
return 1;
@@ -1001,8 +965,7 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address,
new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages,
psize, CPA_DETECT);
- new_prot = verify_rwx(old_prot, new_prot, lpaddr, old_pfn, numpages,
- nx, rw);
+ new_prot = verify_rwx(old_prot, new_prot, lpaddr, old_pfn, numpages);
/*
* If there is a conflict, split the large page.
@@ -1083,7 +1046,6 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
pte_t *pbase = (pte_t *)page_address(base);
unsigned int i, level;
pgprot_t ref_prot;
- bool nx, rw;
pte_t *tmp;
spin_lock(&pgd_lock);
@@ -1091,7 +1053,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address,
* Check for races, another CPU might have split this page
* up for us already:
*/
- tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
+ tmp = _lookup_address_cpa(cpa, address, &level);
if (tmp != kpte) {
spin_unlock(&pgd_lock);
return 1;
@@ -1271,7 +1233,7 @@ static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
* Try to unmap in 2M chunks.
*/
while (end - start >= PMD_SIZE) {
- if (pmd_large(*pmd))
+ if (pmd_leaf(*pmd))
pmd_clear(pmd);
else
__unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);
@@ -1632,11 +1594,10 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
int do_split, err;
unsigned int level;
pte_t *kpte, old_pte;
- bool nx, rw;
address = __cpa_addr(cpa, cpa->curpage);
repeat:
- kpte = _lookup_address_cpa(cpa, address, &level, &nx, &rw);
+ kpte = _lookup_address_cpa(cpa, address, &level);
if (!kpte)
return __cpa_process_fault(cpa, address, primary);
@@ -1658,8 +1619,7 @@ repeat:
new_prot = static_protections(new_prot, address, pfn, 1, 0,
CPA_PROTECT);
- new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1,
- nx, rw);
+ new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1);
new_prot = pgprot_clear_protnone_bits(new_prot);
@@ -2197,7 +2157,7 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
/* Notify hypervisor that we are about to set/clr encryption attribute. */
if (!x86_platform.guest.enc_status_change_prepare(addr, numpages, enc))
- return -EIO;
+ goto vmm_fail;
ret = __change_page_attr_set_clr(&cpa, 1);
@@ -2210,13 +2170,20 @@ static int __set_memory_enc_pgtable(unsigned long addr, int numpages, bool enc)
*/
cpa_flush(&cpa, 0);
+ if (ret)
+ return ret;
+
/* Notify hypervisor that we have successfully set/clr encryption attribute. */
- if (!ret) {
- if (!x86_platform.guest.enc_status_change_finish(addr, numpages, enc))
- ret = -EIO;
- }
+ if (!x86_platform.guest.enc_status_change_finish(addr, numpages, enc))
+ goto vmm_fail;
- return ret;
+ return 0;
+
+vmm_fail:
+ WARN_ONCE(1, "CPA VMM failure to convert memory (addr=%p, numpages=%d) to %s.\n",
+ (void *)addr, numpages, enc ? "private" : "shared");
+
+ return -EIO;
}
static int __set_memory_enc_dec(unsigned long addr, int numpages, bool enc)