From 1f2d8b4421bd0da2c97fb8bad5cc85fc929fef64 Mon Sep 17 00:00:00 2001 From: Kefeng Wang Date: Fri, 12 Apr 2024 14:47:50 +0800 Subject: mm: move mm counter updating out of set_pte_range() Patch series "mm: batch mm counter updating in filemap_map_pages()", v3. Let's batch mm counter updating to accelerate filemap_map_pages(). This patch (of 2): In order to support batch mm counter updating in filemap_map_pages(), move mm counter updating out of set_pte_range(), the folios are file from filemap, and distinguish folios by vmf->flags and vma->vm_flags from another caller finish_fault(). Link: https://lkml.kernel.org/r/20240412064751.119015-1-wangkefeng.wang@huawei.com Link: https://lkml.kernel.org/r/20240412064751.119015-2-wangkefeng.wang@huawei.com Signed-off-by: Kefeng Wang Cc: Matthew Wilcox (Oracle) Signed-off-by: Andrew Morton --- mm/filemap.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'mm/filemap.c') diff --git a/mm/filemap.c b/mm/filemap.c index 21e70434d931..5a518c5075fb 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -3539,6 +3539,8 @@ static vm_fault_t filemap_map_folio_range(struct vm_fault *vmf, skip: if (count) { set_pte_range(vmf, folio, page, count, addr); + add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), + count); folio_ref_add(folio, count); if (in_range(vmf->address, addr, count * PAGE_SIZE)) ret = VM_FAULT_NOPAGE; @@ -3553,6 +3555,7 @@ skip: if (count) { set_pte_range(vmf, folio, page, count, addr); + add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), count); folio_ref_add(folio, count); if (in_range(vmf->address, addr, count * PAGE_SIZE)) ret = VM_FAULT_NOPAGE; @@ -3589,6 +3592,7 @@ static vm_fault_t filemap_map_order0_folio(struct vm_fault *vmf, ret = VM_FAULT_NOPAGE; set_pte_range(vmf, folio, page, 1, addr); + add_mm_counter(vmf->vma->vm_mm, mm_counter_file(folio), 1); folio_ref_inc(folio); return ret; -- cgit v1.2.3