Commit d9eb1ea2 authored by Johannes Weiner's avatar Johannes Weiner Committed by Linus Torvalds
Browse files

mm: memcontrol: delete unused lrucare handling



Swapin faults were the last event to charge pages after they had already
been put on the LRU list.  Now that we charge directly on swapin, the
lrucare portion of the charge code is unused.

Signed-off-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Alex Shi <alex.shi@linux.alibaba.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Roman Gushchin <guro@fb.com>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: Shakeel Butt <shakeelb@google.com>
Link: http://lkml.kernel.org/r/20200508183105.225460-19-hannes@cmpxchg.org


Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0a27cae1
Loading
Loading
Loading
Loading
+2 −3
Original line number Diff line number Diff line
@@ -355,8 +355,7 @@ static inline unsigned long mem_cgroup_protection(struct mem_cgroup *memcg,
enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
						struct mem_cgroup *memcg);

int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask,
		      bool lrucare);
int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask);

void mem_cgroup_uncharge(struct page *page);
void mem_cgroup_uncharge_list(struct list_head *page_list);
@@ -839,7 +838,7 @@ static inline enum mem_cgroup_protection mem_cgroup_protected(
}

static inline int mem_cgroup_charge(struct page *page, struct mm_struct *mm,
				    gfp_t gfp_mask, bool lrucare)
				    gfp_t gfp_mask)
{
	return 0;
}
+1 −2
Original line number Diff line number Diff line
@@ -167,8 +167,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
				addr + PAGE_SIZE);

	if (new_page) {
		err = mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL,
					false);
		err = mem_cgroup_charge(new_page, vma->vm_mm, GFP_KERNEL);
		if (err)
			return err;
	}
+1 −1
Original line number Diff line number Diff line
@@ -845,7 +845,7 @@ static int __add_to_page_cache_locked(struct page *page,
	page->index = offset;

	if (!huge) {
		error = mem_cgroup_charge(page, current->mm, gfp_mask, false);
		error = mem_cgroup_charge(page, current->mm, gfp_mask);
		if (error)
			goto error;
	}
+1 −1
Original line number Diff line number Diff line
@@ -593,7 +593,7 @@ static vm_fault_t __do_huge_pmd_anonymous_page(struct vm_fault *vmf,

	VM_BUG_ON_PAGE(!PageCompound(page), page);

	if (mem_cgroup_charge(page, vma->vm_mm, gfp, false)) {
	if (mem_cgroup_charge(page, vma->vm_mm, gfp)) {
		put_page(page);
		count_vm_event(THP_FAULT_FALLBACK);
		count_vm_event(THP_FAULT_FALLBACK_CHARGE);
+2 −2
Original line number Diff line number Diff line
@@ -1059,7 +1059,7 @@ static void collapse_huge_page(struct mm_struct *mm,
		goto out_nolock;
	}

	if (unlikely(mem_cgroup_charge(new_page, mm, gfp, false))) {
	if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
		result = SCAN_CGROUP_CHARGE_FAIL;
		goto out_nolock;
	}
@@ -1632,7 +1632,7 @@ static void collapse_file(struct mm_struct *mm,
		goto out;
	}

	if (unlikely(mem_cgroup_charge(new_page, mm, gfp, false))) {
	if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
		result = SCAN_CGROUP_CHARGE_FAIL;
		goto out;
	}
Loading