diff options
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 11 |
1 files changed, 9 insertions, 2 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 0c5a2b4e003d..47e98a5726c4 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2116,11 +2116,11 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, page = alloc_buddy_huge_page_with_mpol(h, vma, addr); if (!page) goto out_uncharge_cgroup; + spin_lock(&hugetlb_lock); if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) { SetPagePrivate(page); h->resv_huge_pages--; } - spin_lock(&hugetlb_lock); list_move(&page->lru, &h->hugepage_activelist); /* Fall through */ } @@ -4837,7 +4837,14 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) pud_clear(pud); put_page(virt_to_page(ptep)); mm_dec_nr_pmds(mm); - *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE; + /* + * This update of passed address optimizes loops sequentially + * processing addresses in increments of huge page size (PMD_SIZE + * in this case). By clearing the pud, a PUD_SIZE area is unmapped. + * Update address to the 'last page' in the cleared area so that + * calling loop can move to first page past this area. + */ + *addr |= PUD_SIZE - PMD_SIZE; return 1; } #define want_pmd_share() (1) |