aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPunit Agrawal <punit.agrawal@arm.com>2017-05-26 11:31:57 +1000
committerStephen Rothwell <sfr@canb.auug.org.au>2017-05-26 11:31:57 +1000
commitb6ebe9b384470010fdc0a911bfc48f07cf64c4a5 (patch)
tree44ce98fa11a3e7152671c9c5b49f0756fc674006
parent9ac5676917a074612bf3d40ad73ee85c164b171d (diff)
mm/hugetlb: introduce set_huge_swap_pte_at() helper
set_huge_pte_at(), an architecture callback to populate hugepage ptes, does not provide the range of virtual memory that is targeted. This leads to ambiguity when dealing with swap entries on architectures that support hugepages consisting of contiguous ptes. Fix the problem by introducing an overridable helper that is called when populating the page tables with swap entries. The size of the targeted region is provided to the helper to help determine the number of entries to be updated. Provide a default implementation that maintains the current behaviour. Link: http://lkml.kernel.org/r/20170522133604.11392-6-punit.agrawal@arm.com Signed-off-by: Punit Agrawal <punit.agrawal@arm.com> Acked-by: Steve Capper <steve.capper@arm.com> Cc: Mike Kravetz <mike.kravetz@oracle.com> Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Mark Rutland <mark.rutland@arm.com> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
-rw-r--r--include/linux/hugetlb.h3
-rw-r--r--mm/hugetlb.c14
2 files changed, 14 insertions, 3 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 31e665fbcf76..83d46b2a7cd2 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -158,6 +158,9 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long address, unsigned long end, pgprot_t newprot);
bool is_hugetlb_entry_migration(pte_t pte);
+
+void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned long sz);
#else /* !CONFIG_HUGETLB_PAGE */
static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3dbe3e257975..160ab5d6949a 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -3224,6 +3224,12 @@ static int is_hugetlb_entry_hwpoisoned(pte_t pte)
return 0;
}
+void __weak set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned long sz)
+{
+ set_huge_pte_at(mm, addr, ptep, pte);
+}
+
int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma)
{
@@ -3276,9 +3282,10 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
*/
make_migration_entry_read(&swp_entry);
entry = swp_entry_to_pte(swp_entry);
- set_huge_pte_at(src, addr, src_pte, entry);
+ set_huge_swap_pte_at(src, addr, src_pte,
+ entry, sz);
}
- set_huge_pte_at(dst, addr, dst_pte, entry);
+ set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
} else {
if (cow) {
huge_ptep_set_wrprotect(src, addr, src_pte);
@@ -4295,7 +4302,8 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
make_migration_entry_read(&entry);
newpte = swp_entry_to_pte(entry);
- set_huge_pte_at(mm, address, ptep, newpte);
+ set_huge_swap_pte_at(mm, address, ptep,
+ newpte, huge_page_size(h));
pages++;
}
spin_unlock(ptl);