Skip to content

Commit

Permalink
Revert "mm/memory.c: add vm_insert_pages()"
Browse files Browse the repository at this point in the history
This reverts commit a561d7c.
  • Loading branch information
sfrothwell committed Feb 26, 2020
1 parent ebf9742 commit dd1e9e3
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 111 deletions.
2 changes: 0 additions & 2 deletions include/linux/mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -2603,8 +2603,6 @@ struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t);
int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
struct page **pages, unsigned long *num);
int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
unsigned long num);
int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
Expand Down
111 changes: 2 additions & 109 deletions mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -1407,7 +1407,8 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
}
EXPORT_SYMBOL_GPL(zap_vma_ptes);

static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl)
{
pgd_t *pgd;
p4d_t *p4d;
Expand All @@ -1426,16 +1427,6 @@ static pmd_t *walk_to_pmd(struct mm_struct *mm, unsigned long addr)
return NULL;

VM_BUG_ON(pmd_trans_huge(*pmd));
return pmd;
}

pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl)
{
pmd_t *pmd = walk_to_pmd(mm, addr);

if (!pmd)
return NULL;
return pte_alloc_map_lock(mm, pmd, addr, ptl);
}

Expand All @@ -1460,15 +1451,6 @@ static int insert_page_into_pte_locked(struct mm_struct *mm, pte_t *pte,
return 0;
}

static int insert_page_in_batch_locked(struct mm_struct *mm, pmd_t *pmd,
unsigned long addr, struct page *page, pgprot_t prot)
{
const int err = validate_page_before_insert(page);

return err ? err : insert_page_into_pte_locked(
mm, pte_offset_map(pmd, addr), addr, page, prot);
}

/*
* This is the old fallback for page remapping.
*
Expand Down Expand Up @@ -1497,95 +1479,6 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
return retval;
}

/* insert_pages() amortizes the cost of spinlock operations
* when inserting pages in a loop.
*/
static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
struct page **pages, unsigned long *num, pgprot_t prot)
{
pmd_t *pmd = NULL;
spinlock_t *pte_lock = NULL;
struct mm_struct *const mm = vma->vm_mm;
unsigned long curr_page_idx = 0;
unsigned long remaining_pages_total = *num;
unsigned long pages_to_write_in_pmd;
int ret;
more:
ret = -EFAULT;
pmd = walk_to_pmd(mm, addr);
if (!pmd)
goto out;

pages_to_write_in_pmd = min_t(unsigned long,
remaining_pages_total, PTRS_PER_PTE - pte_index(addr));

/* Allocate the PTE if necessary; takes PMD lock once only. */
ret = -ENOMEM;
if (pte_alloc(mm, pmd, addr))
goto out;
pte_lock = pte_lockptr(mm, pmd);

while (pages_to_write_in_pmd) {
int pte_idx = 0;
const int batch_size = min_t(int, pages_to_write_in_pmd, 8);

spin_lock(pte_lock);
for (; pte_idx < batch_size; ++pte_idx) {
int err = insert_page_in_batch_locked(mm, pmd,
addr, pages[curr_page_idx], prot);
if (unlikely(err)) {
spin_unlock(pte_lock);
ret = err;
remaining_pages_total -= pte_idx;
goto out;
}
addr += PAGE_SIZE;
++curr_page_idx;
}
spin_unlock(pte_lock);
pages_to_write_in_pmd -= batch_size;
remaining_pages_total -= batch_size;
}
if (remaining_pages_total)
goto more;
ret = 0;
out:
*num = remaining_pages_total;
return ret;
}

/**
* vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
* @vma: user vma to map to
* @addr: target start user address of these pages
* @pages: source kernel pages
* @num: in: number of pages to map. out: number of pages that were *not*
* mapped. (0 means all pages were successfully mapped).
*
* Preferred over vm_insert_page() when inserting multiple pages.
*
* In case of error, we may have mapped a subset of the provided
* pages. It is the caller's responsibility to account for this case.
*
* The same restrictions apply as in vm_insert_page().
*/
int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
struct page **pages, unsigned long *num)
{
const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;

if (addr < vma->vm_start || end_addr >= vma->vm_end)
return -EFAULT;
if (!(vma->vm_flags & VM_MIXEDMAP)) {
BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
BUG_ON(vma->vm_flags & VM_PFNMAP);
vma->vm_flags |= VM_MIXEDMAP;
}
/* Defer page refcount checking till we're about to map that page. */
return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_insert_pages);

/**
* vm_insert_page - insert single page into user vma
* @vma: user vma to map to
Expand Down

0 comments on commit dd1e9e3

Please sign in to comment.