Skip to content

Commit

Permalink
x86/mm/pageattr: Add last levels of error path
Browse files Browse the repository at this point in the history
We try to free the pagetable pages once we've unmapped our portion.

Signed-off-by: Borislav Petkov <bp@suse.de>
Signed-off-by: Matt Fleming <matt.fleming@intel.com>
  • Loading branch information
suryasaimadhu authored and Matt Fleming committed Nov 2, 2013
1 parent 0bb8aee commit 52a628f
Showing 1 changed file with 93 additions and 1 deletion.
94 changes: 93 additions & 1 deletion arch/x86/mm/pageattr.c
Original file line number Diff line number Diff line change
Expand Up @@ -666,7 +666,99 @@ static int split_large_page(pte_t *kpte, unsigned long address)
return 0;
}

#define unmap_pmd_range(pud, start, pre_end) do {} while (0)
static bool try_to_free_pte_page(pte_t *pte)
{
int i;

for (i = 0; i < PTRS_PER_PTE; i++)
if (!pte_none(pte[i]))
return false;

free_page((unsigned long)pte);
return true;
}

static bool try_to_free_pmd_page(pmd_t *pmd)
{
int i;

for (i = 0; i < PTRS_PER_PMD; i++)
if (!pmd_none(pmd[i]))
return false;

free_page((unsigned long)pmd);
return true;
}

static bool unmap_pte_range(pmd_t *pmd, unsigned long start, unsigned long end)
{
pte_t *pte = pte_offset_kernel(pmd, start);

while (start < end) {
set_pte(pte, __pte(0));

start += PAGE_SIZE;
pte++;
}

if (try_to_free_pte_page((pte_t *)pmd_page_vaddr(*pmd))) {
pmd_clear(pmd);
return true;
}
return false;
}

static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd,
unsigned long start, unsigned long end)
{
if (unmap_pte_range(pmd, start, end))
if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
pud_clear(pud);
}

static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
{
pmd_t *pmd = pmd_offset(pud, start);

/*
* Not on a 2MB page boundary?
*/
if (start & (PMD_SIZE - 1)) {
unsigned long next_page = (start + PMD_SIZE) & PMD_MASK;
unsigned long pre_end = min_t(unsigned long, end, next_page);

__unmap_pmd_range(pud, pmd, start, pre_end);

start = pre_end;
pmd++;
}

/*
* Try to unmap in 2M chunks.
*/
while (end - start >= PMD_SIZE) {
if (pmd_large(*pmd))
pmd_clear(pmd);
else
__unmap_pmd_range(pud, pmd, start, start + PMD_SIZE);

start += PMD_SIZE;
pmd++;
}

/*
* 4K leftovers?
*/
if (start < end)
return __unmap_pmd_range(pud, pmd, start, end);

/*
* Try again to free the PMD page if haven't succeeded above.
*/
if (!pud_none(*pud))
if (try_to_free_pmd_page((pmd_t *)pud_page_vaddr(*pud)))
pud_clear(pud);
}

static void unmap_pud_range(pgd_t *pgd, unsigned long start, unsigned long end)
{
Expand Down

0 comments on commit 52a628f

Please sign in to comment.