Commit 3fba69a5 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds
Browse files

mm: memcontrol: drop @compound parameter from memcg charging API



The memcg charging API carries a boolean @compound parameter that tells
whether the page we're dealing with is a hugepage.
mem_cgroup_commit_charge() has another boolean @lrucare that indicates
whether the page needs LRU locking or not while charging.  The majority of
callsites know those parameters at compile time, which results in a lot of
naked "false, false" argument lists.  This makes for cryptic code and is a
breeding ground for subtle mistakes.

Thankfully, the huge page state can be inferred from the page itself and
doesn't need to be passed along.  This is safe because charging completes
before the page is published and somebody may split it.

Simplify the callsites by removing @compound, and let memcg infer the
state by using hpage_nr_pages() unconditionally.  That function does
PageTransHuge() to identify huge pages, which also helpfully asserts that
nobody passes in tail pages by accident.

The following patches will introduce a new charging API, best not to carry
over unnecessary weight.

Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarAlex Shi <alex.shi@linux.alibaba.com>
Reviewed-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Roman Gushchin <guro@fb.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Link: http://lkml.kernel.org/r/20200508183105.225460-4-hannes@cmpxchg.org


Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent abb242f5
Loading
Loading
Loading
Loading
+8 −14
Original line number Diff line number Diff line
@@ -359,15 +359,12 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
						struct mem_cgroup *memcg);

int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
			  bool compound);
			  gfp_t gfp_mask, struct mem_cgroup **memcgp);
int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
			  gfp_t gfp_mask, struct mem_cgroup **memcgp,
			  bool compound);
			  gfp_t gfp_mask, struct mem_cgroup **memcgp);
void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
			      bool lrucare, bool compound);
void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
		bool compound);
			      bool lrucare);
void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg);
void mem_cgroup_uncharge(struct page *page);
void mem_cgroup_uncharge_list(struct list_head *page_list);

@@ -849,8 +846,7 @@ static inline enum mem_cgroup_protection mem_cgroup_protected(

static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
					gfp_t gfp_mask,
					struct mem_cgroup **memcgp,
					bool compound)
					struct mem_cgroup **memcgp)
{
	*memcgp = NULL;
	return 0;
@@ -859,8 +855,7 @@ static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
static inline int mem_cgroup_try_charge_delay(struct page *page,
					      struct mm_struct *mm,
					      gfp_t gfp_mask,
					      struct mem_cgroup **memcgp,
					      bool compound)
					      struct mem_cgroup **memcgp)
{
	*memcgp = NULL;
	return 0;
@@ -868,13 +863,12 @@ static inline int mem_cgroup_try_charge_delay(struct page *page,

static inline void mem_cgroup_commit_charge(struct page *page,
					    struct mem_cgroup *memcg,
					    bool lrucare, bool compound)
					    bool lrucare)
{
}

static inline void mem_cgroup_cancel_charge(struct page *page,
					    struct mem_cgroup *memcg,
					    bool compound)
					    struct mem_cgroup *memcg)
{
}

+3 −3
Original line number Diff line number Diff line
@@ -169,7 +169,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,

	if (new_page) {
		err = mem_cgroup_try_charge(new_page, vma->vm_mm, GFP_KERNEL,
					    &memcg, false);
					    &memcg);
		if (err)
			return err;
	}
@@ -181,7 +181,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
	err = -EAGAIN;
	if (!page_vma_mapped_walk(&pvmw)) {
		if (new_page)
			mem_cgroup_cancel_charge(new_page, memcg, false);
			mem_cgroup_cancel_charge(new_page, memcg);
		goto unlock;
	}
	VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
@@ -189,7 +189,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
	if (new_page) {
		get_page(new_page);
		page_add_new_anon_rmap(new_page, vma, addr, false);
		mem_cgroup_commit_charge(new_page, memcg, false, false);
		mem_cgroup_commit_charge(new_page, memcg, false);
		lru_cache_add_active_or_unevictable(new_page, vma);
	} else
		/* no new page, just dec_mm_counter for old_page */
+3 −3
Original line number Diff line number Diff line
@@ -842,7 +842,7 @@ static int __add_to_page_cache_locked(struct page *page,

	if (!huge) {
		error = mem_cgroup_try_charge(page, current->mm,
					      gfp_mask, &memcg, false);
					      gfp_mask, &memcg);
		if (error)
			return error;
	}
@@ -878,14 +878,14 @@ unlock:
		goto error;

	if (!huge)
		mem_cgroup_commit_charge(page, memcg, false, false);
		mem_cgroup_commit_charge(page, memcg, false);
	trace_mm_filemap_add_to_page_cache(page);
	return 0;
error:
	page->mapping = NULL;
	/* Leave page->index set: truncation relies upon it */
	if (!huge)
		mem_cgroup_cancel_charge(page, memcg, false);
		mem_cgroup_cancel_charge(page, memcg);
	put_page(page);
	return xas_error(&xas);
}
+4 −4
Original line number Diff line number Diff line
@@ -594,7 +594,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,

	VM_BUG_ON_PAGE(!PageCompound(page), page);

	if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg, true)) {
	if (mem_cgroup_try_charge_delay(page, vma->vm_mm, gfp, &memcg)) {
		put_page(page);
		count_vm_event(THP_FAULT_FALLBACK);
		count_vm_event(THP_FAULT_FALLBACK_CHARGE);
@@ -630,7 +630,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
			vm_fault_t ret2;

			spin_unlock(vmf->ptl);
			mem_cgroup_cancel_charge(page, memcg, true);
			mem_cgroup_cancel_charge(page, memcg);
			put_page(page);
			pte_free(vma->vm_mm, pgtable);
			ret2 = handle_userfault(vmf, VM_UFFD_MISSING);
@@ -641,7 +641,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,
		entry = mk_huge_pmd(page, vma->vm_page_prot);
		entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
		page_add_new_anon_rmap(page, vma, haddr, true);
		mem_cgroup_commit_charge(page, memcg, false, true);
		mem_cgroup_commit_charge(page, memcg, false);
		lru_cache_add_active_or_unevictable(page, vma);
		pgtable_trans_huge_deposit(vma->vm_mm, vmf->pmd, pgtable);
		set_pmd_at(vma->vm_mm, haddr, vmf->pmd, entry);
@@ -658,7 +658,7 @@ unlock_release:
release:
	if (pgtable)
		pte_free(vma->vm_mm, pgtable);
	mem_cgroup_cancel_charge(page, memcg, true);
	mem_cgroup_cancel_charge(page, memcg);
	put_page(page);
	return ret;

+10 −10
Original line number Diff line number Diff line
@@ -1060,7 +1060,7 @@ static void collapse_huge_page(struct mm_struct *mm,
		goto out_nolock;
	}

	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg))) {
		result = SCAN_CGROUP_CHARGE_FAIL;
		goto out_nolock;
	}
@@ -1068,7 +1068,7 @@ static void collapse_huge_page(struct mm_struct *mm,
	down_read(&mm->mmap_sem);
	result = hugepage_vma_revalidate(mm, address, &vma);
	if (result) {
		mem_cgroup_cancel_charge(new_page, memcg, true);
		mem_cgroup_cancel_charge(new_page, memcg);
		up_read(&mm->mmap_sem);
		goto out_nolock;
	}
@@ -1076,7 +1076,7 @@ static void collapse_huge_page(struct mm_struct *mm,
	pmd = mm_find_pmd(mm, address);
	if (!pmd) {
		result = SCAN_PMD_NULL;
		mem_cgroup_cancel_charge(new_page, memcg, true);
		mem_cgroup_cancel_charge(new_page, memcg);
		up_read(&mm->mmap_sem);
		goto out_nolock;
	}
@@ -1088,7 +1088,7 @@ static void collapse_huge_page(struct mm_struct *mm,
	 */
	if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
						     pmd, referenced)) {
		mem_cgroup_cancel_charge(new_page, memcg, true);
		mem_cgroup_cancel_charge(new_page, memcg);
		up_read(&mm->mmap_sem);
		goto out_nolock;
	}
@@ -1176,7 +1176,7 @@ static void collapse_huge_page(struct mm_struct *mm,
	spin_lock(pmd_ptl);
	BUG_ON(!pmd_none(*pmd));
	page_add_new_anon_rmap(new_page, vma, address, true);
	mem_cgroup_commit_charge(new_page, memcg, false, true);
	mem_cgroup_commit_charge(new_page, memcg, false);
	count_memcg_events(memcg, THP_COLLAPSE_ALLOC, 1);
	lru_cache_add_active_or_unevictable(new_page, vma);
	pgtable_trans_huge_deposit(mm, pmd, pgtable);
@@ -1194,7 +1194,7 @@ out_nolock:
	trace_mm_collapse_huge_page(mm, isolated, result);
	return;
out:
	mem_cgroup_cancel_charge(new_page, memcg, true);
	mem_cgroup_cancel_charge(new_page, memcg);
	goto out_up_write;
}

@@ -1637,7 +1637,7 @@ static void collapse_file(struct mm_struct *mm,
		goto out;
	}

	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
	if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg))) {
		result = SCAN_CGROUP_CHARGE_FAIL;
		goto out;
	}
@@ -1650,7 +1650,7 @@ static void collapse_file(struct mm_struct *mm,
			break;
		xas_unlock_irq(&xas);
		if (!xas_nomem(&xas, GFP_KERNEL)) {
			mem_cgroup_cancel_charge(new_page, memcg, true);
			mem_cgroup_cancel_charge(new_page, memcg);
			result = SCAN_FAIL;
			goto out;
		}
@@ -1887,7 +1887,7 @@ xa_unlocked:

		SetPageUptodate(new_page);
		page_ref_add(new_page, HPAGE_PMD_NR - 1);
		mem_cgroup_commit_charge(new_page, memcg, false, true);
		mem_cgroup_commit_charge(new_page, memcg, false);

		if (is_shmem) {
			set_page_dirty(new_page);
@@ -1942,7 +1942,7 @@ xa_unlocked:
		VM_BUG_ON(nr_none);
		xas_unlock_irq(&xas);

		mem_cgroup_cancel_charge(new_page, memcg, true);
		mem_cgroup_cancel_charge(new_page, memcg);
		new_page->mapping = NULL;
	}

Loading