Skip to content

Commit

Permalink
mm: hugetlb: cleanup using paeg_huge_active()
Browse files Browse the repository at this point in the history
Now we have an easy access to hugepages' activeness, so existing helpers to
get the information can be cleaned up.

[akpm@linux-foundation.org: s/PageHugeActive/page_huge_active/]
Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Hugh Dickins <hughd@google.com>
Reviewed-by: Michal Hocko <mhocko@suse.cz>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
  • Loading branch information
Naoya Horiguchi authored and torvalds committed Apr 15, 2015
1 parent bcc5422 commit 7e1f049
Show file tree
Hide file tree
Showing 4 changed files with 13 additions and 40 deletions.
2 changes: 0 additions & 2 deletions include/linux/hugetlb.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,6 @@ void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed);
int dequeue_hwpoisoned_huge_page(struct page *page);
bool isolate_huge_page(struct page *page, struct list_head *list);
void putback_active_hugepage(struct page *page);
bool is_hugepage_active(struct page *page);
void free_huge_page(struct page *page);

#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
Expand Down Expand Up @@ -152,7 +151,6 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list)
return false;
}
#define putback_active_hugepage(p) do {} while (0)
#define is_hugepage_active(x) false

static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot)
Expand Down
7 changes: 7 additions & 0 deletions include/linux/page-flags.h
Original file line number Diff line number Diff line change
Expand Up @@ -470,11 +470,18 @@ static inline void ClearPageCompound(struct page *page)
#ifdef CONFIG_HUGETLB_PAGE
int PageHuge(struct page *page);
int PageHeadHuge(struct page *page);
bool page_huge_active(struct page *page);
#else
TESTPAGEFLAG_FALSE(Huge)
TESTPAGEFLAG_FALSE(HeadHuge)

static inline bool page_huge_active(struct page *page)
{
return 0;
}
#endif


#ifdef CONFIG_TRANSPARENT_HUGEPAGE
/*
* PageHuge() only returns true for hugetlbfs pages, but not for
Expand Down
42 changes: 5 additions & 37 deletions mm/hugetlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -3896,20 +3896,6 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,

#ifdef CONFIG_MEMORY_FAILURE

/* Should be called in hugetlb_lock */
static int is_hugepage_on_freelist(struct page *hpage)
{
struct page *page;
struct page *tmp;
struct hstate *h = page_hstate(hpage);
int nid = page_to_nid(hpage);

list_for_each_entry_safe(page, tmp, &h->hugepage_freelists[nid], lru)
if (page == hpage)
return 1;
return 0;
}

/*
* This function is called from memory failure code.
* Assume the caller holds page lock of the head page.
Expand All @@ -3921,7 +3907,11 @@ int dequeue_hwpoisoned_huge_page(struct page *hpage)
int ret = -EBUSY;

spin_lock(&hugetlb_lock);
if (is_hugepage_on_freelist(hpage)) {
/*
* Just checking !page_huge_active is not enough, because that could be
* an isolated/hwpoisoned hugepage (which have >0 refcount).
*/
if (!page_huge_active(hpage) && !page_count(hpage)) {
/*
* Hwpoisoned hugepage isn't linked to activelist or freelist,
* but dangling hpage->lru can trigger list-debug warnings
Expand Down Expand Up @@ -3965,25 +3955,3 @@ void putback_active_hugepage(struct page *page)
spin_unlock(&hugetlb_lock);
put_page(page);
}

bool is_hugepage_active(struct page *page)
{
VM_BUG_ON_PAGE(!PageHuge(page), page);
/*
* This function can be called for a tail page because the caller,
* scan_movable_pages, scans through a given pfn-range which typically
* covers one memory block. In systems using gigantic hugepage (1GB
* for x86_64,) a hugepage is larger than a memory block, and we don't
* support migrating such large hugepages for now, so return false
* when called for tail pages.
*/
if (PageTail(page))
return false;
/*
* Refcount of a hwpoisoned hugepages is 1, but they are not active,
* so we should return false for them.
*/
if (unlikely(PageHWPoison(page)))
return false;
return page_count(page) > 0;
}
2 changes: 1 addition & 1 deletion mm/memory_hotplug.c
Original file line number Diff line number Diff line change
Expand Up @@ -1373,7 +1373,7 @@ static unsigned long scan_movable_pages(unsigned long start, unsigned long end)
if (PageLRU(page))
return pfn;
if (PageHuge(page)) {
if (is_hugepage_active(page))
if (page_huge_active(page))
return pfn;
else
pfn = round_up(pfn + 1,
Expand Down

0 comments on commit 7e1f049

Please sign in to comment.